public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Arisu Tachibana" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.6 commit in: /
Date: Mon, 20 Oct 2025 05:30:48 +0000 (UTC)	[thread overview]
Message-ID: <1760938232.ed1be0b34ccfde8f1ca4d17fbf44edbde97175f4.alicef@gentoo> (raw)

commit:     ed1be0b34ccfde8f1ca4d17fbf44edbde97175f4
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 20 05:30:32 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Oct 20 05:30:32 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ed1be0b3

Linux patch 6.6.113

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1112_linux-6.6.113.patch | 7576 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7580 insertions(+)

diff --git a/0000_README b/0000_README
index 7e85278a..e684251a 100644
--- a/0000_README
+++ b/0000_README
@@ -491,6 +491,10 @@ Patch:  1111_linux-6.6.112.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.6.112
 
+Patch:  1112_linux-6.6.113.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.6.113
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1112_linux-6.6.113.patch b/1112_linux-6.6.113.patch
new file mode 100644
index 00000000..48d54ba4
--- /dev/null
+++ b/1112_linux-6.6.113.patch
@@ -0,0 +1,7576 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 60d48ebbc2cb00..fff3ca50c6c26c 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5638,6 +5638,9 @@
+ 
+ 	rootflags=	[KNL] Set root filesystem mount option string
+ 
++	initramfs_options= [KNL]
++                        Specify mount options for for the initramfs mount.
++
+ 	rootfstype=	[KNL] Set root filesystem type
+ 
+ 	rootwait	[KNL] Wait (indefinitely) for root device to show up.
+diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
+index 5ac994b3c0aa15..b304bc5a08c402 100644
+--- a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
++++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
+@@ -57,11 +57,24 @@ required:
+   - clocks
+   - clock-names
+   - '#phy-cells'
+-  - power-domains
+   - resets
+   - reset-names
+   - rockchip,grf
+ 
++allOf:
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
++              - rockchip,px30-csi-dphy
++              - rockchip,rk1808-csi-dphy
++              - rockchip,rk3326-csi-dphy
++              - rockchip,rk3368-csi-dphy
++    then:
++      required:
++        - power-domains
++
+ additionalProperties: false
+ 
+ examples:
+diff --git a/Makefile b/Makefile
+index 64d76baa0e0c9b..ab277ff8764317 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 112
++SUBLEVEL = 113
+ EXTRAVERSION =
+ NAME = Pinguïn Aangedreven
+ 
+diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
+index c907478be196ed..4abb86dc98fdac 100644
+--- a/arch/arm/mach-omap2/pm33xx-core.c
++++ b/arch/arm/mach-omap2/pm33xx-core.c
+@@ -388,12 +388,15 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
+ 		if (!state_node)
+ 			break;
+ 
+-		if (!of_device_is_available(state_node))
++		if (!of_device_is_available(state_node)) {
++			of_node_put(state_node);
+ 			continue;
++		}
+ 
+ 		if (i == CPUIDLE_STATE_MAX) {
+ 			pr_warn("%s: cpuidle states reached max possible\n",
+ 				__func__);
++			of_node_put(state_node);
+ 			break;
+ 		}
+ 
+@@ -403,6 +406,7 @@ static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
+ 			states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
+ 							 WFI_FLAG_FLUSH_CACHE;
+ 
++		of_node_put(state_node);
+ 		state_count++;
+ 	}
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 6f5f96853ba1c2..701e139a9cbd06 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1529,6 +1529,8 @@ mdss: display-subsystem@1a00000 {
+ 
+ 			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ 
++			resets = <&gcc GCC_MDSS_BCR>;
++
+ 			interrupt-controller;
+ 			#interrupt-cells = <1>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index c844e01f9aa15b..86432ed25b23a1 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1210,6 +1210,8 @@ mdss: display-subsystem@1a00000 {
+ 
+ 			power-domains = <&gcc MDSS_GDSC>;
+ 
++			resets = <&gcc GCC_MDSS_BCR>;
++
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			#interrupt-cells = <1>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 64ea9d73d970a9..fa91e2036dd56f 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -5208,11 +5208,11 @@ slimbam: dma-controller@17184000 {
+ 			compatible = "qcom,bam-v1.7.4", "qcom,bam-v1.7.0";
+ 			qcom,controlled-remotely;
+ 			reg = <0 0x17184000 0 0x2a000>;
+-			num-channels = <31>;
++			num-channels = <23>;
+ 			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 			qcom,ee = <1>;
+-			qcom,num-ees = <2>;
++			qcom,num-ees = <4>;
+ 			iommus = <&apps_smmu 0x1806 0x0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index 1497f7c8adfaf4..dfdc6e659cc464 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -185,7 +185,7 @@ secure_proxy_sa3: mailbox@43600000 {
+ 
+ 	main_pmx0: pinctrl@f4000 {
+ 		compatible = "pinctrl-single";
+-		reg = <0x00 0xf4000 0x00 0x2ac>;
++		reg = <0x00 0xf4000 0x00 0x25c>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 2ce9ef9d924aac..4561c4a670afbd 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2174,17 +2174,21 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+ #ifdef CONFIG_ARM64_MTE
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
++	static bool cleared_zero_page = false;
++
+ 	sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
+ 
+ 	mte_cpu_setup();
+ 
+ 	/*
+ 	 * Clear the tags in the zero page. This needs to be done via the
+-	 * linear map which has the Tagged attribute.
++	 * linear map which has the Tagged attribute. Since this page is
++	 * always mapped as pte_special(), set_pte_at() will not attempt to
++	 * clear the tags or set PG_mte_tagged.
+ 	 */
+-	if (try_page_mte_tagging(ZERO_PAGE(0))) {
++	if (!cleared_zero_page) {
++		cleared_zero_page = true;
+ 		mte_clear_page_tags(lm_alias(empty_zero_page));
+-		set_page_mte_tagged(ZERO_PAGE(0));
+ 	}
+ 
+ 	kasan_init_hw_tags_cpu();
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 4edecaac8f919a..84ea9c50c076f0 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -428,7 +428,8 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+ 			put_page(page);
+ 			break;
+ 		}
+-		WARN_ON_ONCE(!page_mte_tagged(page));
++
++		WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
+ 
+ 		/* limit access to the end of the page */
+ 		offset = offset_in_page(addr);
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 70b91a8c6bb3f3..c0942cce3b6878 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -131,9 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 
+ void *alloc_insn_page(void)
+ {
+-	return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
++	void *addr;
++
++	addr = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ 			GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+ 			NUMA_NO_NODE, __builtin_return_address(0));
++	if (!addr)
++		return NULL;
++	set_memory_rox((unsigned long)addr, 1);
++	return addr;
+ }
+ 
+ /* arm kprobe: install breakpoint in text */
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 655dc2b1616f2c..31be0d00976e22 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -363,10 +363,9 @@ void __init platform_init(void)
+ 	arch_reserve_vmcore();
+ 	arch_parse_crashkernel();
+ 
+-#ifdef CONFIG_ACPI_TABLE_UPGRADE
+-	acpi_table_upgrade();
+-#endif
+ #ifdef CONFIG_ACPI
++	acpi_table_upgrade();
++	acpi_gbl_use_global_lock = false;
+ 	acpi_gbl_use_default_register_widths = false;
+ 	acpi_boot_table_init();
+ #endif
+diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h
+index 82d1148c6379a5..74b4027a4e8083 100644
+--- a/arch/parisc/include/uapi/asm/ioctls.h
++++ b/arch/parisc/include/uapi/asm/ioctls.h
+@@ -10,10 +10,10 @@
+ #define TCSETS		_IOW('T', 17, struct termios) /* TCSETATTR */
+ #define TCSETSW		_IOW('T', 18, struct termios) /* TCSETATTRD */
+ #define TCSETSF		_IOW('T', 19, struct termios) /* TCSETATTRF */
+-#define TCGETA		_IOR('T', 1, struct termio)
+-#define TCSETA		_IOW('T', 2, struct termio)
+-#define TCSETAW		_IOW('T', 3, struct termio)
+-#define TCSETAF		_IOW('T', 4, struct termio)
++#define TCGETA          0x40125401
++#define TCSETA          0x80125402
++#define TCSETAW         0x80125403
++#define TCSETAF         0x80125404
+ #define TCSBRK		_IO('T', 5)
+ #define TCXONC		_IO('T', 6)
+ #define TCFLSH		_IO('T', 7)
+diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
+index 69d65ffab31263..03165c82dfdbd9 100644
+--- a/arch/parisc/lib/memcpy.c
++++ b/arch/parisc/lib/memcpy.c
+@@ -41,7 +41,6 @@ unsigned long raw_copy_from_user(void *dst, const void __user *src,
+ 	mtsp(get_kernel_space(), SR_TEMP2);
+ 
+ 	/* Check region is user accessible */
+-	if (start)
+ 	while (start < end) {
+ 		if (!prober_user(SR_TEMP1, start)) {
+ 			newlen = (start - (unsigned long) src);
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 28fac477007316..5337ad8a2e340f 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1895,7 +1895,7 @@ static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ 	return 0;
+ 
+ out:
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs);
+ 	return ret;
+ }
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index fdcf10cd4d1270..b53f817fc421f8 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -587,7 +587,7 @@ static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ 
+ out:
+ 	/* TODO: handle RTAS cleanup in ->msi_finish() ? */
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	return ret;
+ }
+ 
+diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
+deleted file mode 100644
+index 7822ea92e54afd..00000000000000
+--- a/arch/s390/net/bpf_jit.h
++++ /dev/null
+@@ -1,55 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * BPF Jit compiler defines
+- *
+- * Copyright IBM Corp. 2012,2015
+- *
+- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+- *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
+- */
+-
+-#ifndef __ARCH_S390_NET_BPF_JIT_H
+-#define __ARCH_S390_NET_BPF_JIT_H
+-
+-#ifndef __ASSEMBLY__
+-
+-#include <linux/filter.h>
+-#include <linux/types.h>
+-
+-#endif /* __ASSEMBLY__ */
+-
+-/*
+- * Stackframe layout (packed stack):
+- *
+- *				    ^ high
+- *	      +---------------+     |
+- *	      | old backchain |     |
+- *	      +---------------+     |
+- *	      |   r15 - r6    |     |
+- *	      +---------------+     |
+- *	      | 4 byte align  |     |
+- *	      | tail_call_cnt |     |
+- * BFP	   -> +===============+     |
+- *	      |		      |     |
+- *	      |   BPF stack   |     |
+- *	      |		      |     |
+- * R15+160 -> +---------------+     |
+- *	      | new backchain |     |
+- * R15+152 -> +---------------+     |
+- *	      | + 152 byte SA |     |
+- * R15	   -> +---------------+     + low
+- *
+- * We get 160 bytes stack space from calling function, but only use
+- * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
+- *
+- * The stack size used by the BPF program ("BPF stack" above) is passed
+- * via "aux->stack_depth".
+- */
+-#define STK_SPACE_ADD	(160)
+-#define STK_160_UNUSED	(160 - 12 * 8)
+-#define STK_OFF		(STK_SPACE_ADD - STK_160_UNUSED)
+-
+-#define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
+-#define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
+-
+-#endif /* __ARCH_S390_NET_BPF_JIT_H */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 2d8facfd4e4252..5a64d34a37482b 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -31,11 +31,10 @@
+ #include <asm/nospec-branch.h>
+ #include <asm/set_memory.h>
+ #include <asm/text-patching.h>
+-#include "bpf_jit.h"
+ 
+ struct bpf_jit {
+ 	u32 seen;		/* Flags to remember seen eBPF instructions */
+-	u32 seen_reg[16];	/* Array to remember which registers are used */
++	u16 seen_regs;		/* Mask to remember which registers are used */
+ 	u32 *addrs;		/* Array with relative instruction addresses */
+ 	u8 *prg_buf;		/* Start of program */
+ 	int size;		/* Size of program and literal pool */
+@@ -53,6 +52,7 @@ struct bpf_jit {
+ 	int excnt;		/* Number of exception table entries */
+ 	int prologue_plt_ret;	/* Return address for prologue hotpatch PLT */
+ 	int prologue_plt;	/* Start of prologue hotpatch PLT */
++	u32 frame_off;		/* Offset of struct bpf_prog from %r15 */
+ };
+ 
+ #define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
+@@ -118,8 +118,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ {
+ 	u32 r1 = reg2hex[b1];
+ 
+-	if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
+-		jit->seen_reg[r1] = 1;
++	if (r1 >= 6 && r1 <= 15)
++		jit->seen_regs |= (1 << r1);
+ }
+ 
+ #define REG_SET_SEEN(b1)					\
+@@ -127,8 +127,6 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ 	reg_set_seen(jit, b1);					\
+ })
+ 
+-#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
+-
+ /*
+  * EMIT macros for code generation
+  */
+@@ -400,12 +398,26 @@ static void jit_fill_hole(void *area, unsigned int size)
+ 	memset(area, 0, size);
+ }
+ 
++/*
++ * Caller-allocated part of the frame.
++ * Thanks to packed stack, its otherwise unused initial part can be used for
++ * the BPF stack and for the next frame.
++ */
++struct prog_frame {
++	u64 unused[8];
++	/* BPF stack starts here and grows towards 0 */
++	u32 tail_call_cnt;
++	u32 pad;
++	u64 r6[10];  /* r6 - r15 */
++	u64 backchain;
++} __packed;
++
+ /*
+  * Save registers from "rs" (register start) to "re" (register end) on stack
+  */
+ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ {
+-	u32 off = STK_OFF_R6 + (rs - 6) * 8;
++	u32 off = offsetof(struct prog_frame, r6) + (rs - 6) * 8;
+ 
+ 	if (rs == re)
+ 		/* stg %rs,off(%r15) */
+@@ -418,12 +430,9 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ /*
+  * Restore registers from "rs" (register start) to "re" (register end) on stack
+  */
+-static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
++static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
+ {
+-	u32 off = STK_OFF_R6 + (rs - 6) * 8;
+-
+-	if (jit->seen & SEEN_STACK)
+-		off += STK_OFF + stack_depth;
++	u32 off = jit->frame_off + offsetof(struct prog_frame, r6) + (rs - 6) * 8;
+ 
+ 	if (rs == re)
+ 		/* lg %rs,off(%r15) */
+@@ -436,12 +445,12 @@ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
+ /*
+  * Return first seen register (from start)
+  */
+-static int get_start(struct bpf_jit *jit, int start)
++static int get_start(u16 seen_regs, int start)
+ {
+ 	int i;
+ 
+ 	for (i = start; i <= 15; i++) {
+-		if (jit->seen_reg[i])
++		if (seen_regs & (1 << i))
+ 			return i;
+ 	}
+ 	return 0;
+@@ -450,15 +459,15 @@ static int get_start(struct bpf_jit *jit, int start)
+ /*
+  * Return last seen register (from start) (gap >= 2)
+  */
+-static int get_end(struct bpf_jit *jit, int start)
++static int get_end(u16 seen_regs, int start)
+ {
+ 	int i;
+ 
+ 	for (i = start; i < 15; i++) {
+-		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
++		if (!(seen_regs & (3 << i)))
+ 			return i - 1;
+ 	}
+-	return jit->seen_reg[15] ? 15 : 14;
++	return (seen_regs & (1 << 15)) ? 15 : 14;
+ }
+ 
+ #define REGS_SAVE	1
+@@ -467,8 +476,9 @@ static int get_end(struct bpf_jit *jit, int start)
+  * Save and restore clobbered registers (6-15) on stack.
+  * We save/restore registers in chunks with gap >= 2 registers.
+  */
+-static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
++static void save_restore_regs(struct bpf_jit *jit, int op, u16 extra_regs)
+ {
++	u16 seen_regs = jit->seen_regs | extra_regs;
+ 	const int last = 15, save_restore_size = 6;
+ 	int re = 6, rs;
+ 
+@@ -482,14 +492,14 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
+ 	}
+ 
+ 	do {
+-		rs = get_start(jit, re);
++		rs = get_start(seen_regs, re);
+ 		if (!rs)
+ 			break;
+-		re = get_end(jit, rs + 1);
++		re = get_end(seen_regs, rs + 1);
+ 		if (op == REGS_SAVE)
+ 			save_regs(jit, rs, re);
+ 		else
+-			restore_regs(jit, rs, re, stack_depth);
++			restore_regs(jit, rs, re);
+ 		re++;
+ 	} while (re <= last);
+ }
+@@ -554,11 +564,12 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
+  * Emit function prologue
+  *
+  * Save registers and create stack frame if necessary.
+- * See stack frame layout description in "bpf_jit.h"!
++ * Stack frame layout is described by struct prog_frame.
+  */
+-static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+-			     u32 stack_depth)
++static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp)
+ {
++	BUILD_BUG_ON(sizeof(struct prog_frame) != STACK_FRAME_OVERHEAD);
++
+ 	/* No-op for hotpatching */
+ 	/* brcl 0,prologue_plt */
+ 	EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
+@@ -566,8 +577,9 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 
+ 	if (fp->aux->func_idx == 0) {
+ 		/* Initialize the tail call counter in the main program. */
+-		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
+-		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
++		/* xc tail_call_cnt(4,%r15),tail_call_cnt(%r15) */
++		_EMIT6(0xd703f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | offsetof(struct prog_frame, tail_call_cnt));
+ 	} else {
+ 		/*
+ 		 * Skip the tail call counter initialization in subprograms.
+@@ -579,7 +591,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	/* Tail calls have to skip above initialization */
+ 	jit->tail_call_start = jit->prg;
+ 	/* Save registers */
+-	save_restore_regs(jit, REGS_SAVE, stack_depth);
++	save_restore_regs(jit, REGS_SAVE, 0);
+ 	/* Setup literal pool */
+ 	if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
+ 		if (!is_first_pass(jit) &&
+@@ -597,13 +609,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
+ 		/* lgr %w1,%r15 (backchain) */
+ 		EMIT4(0xb9040000, REG_W1, REG_15);
+-		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+-		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
+-		/* aghi %r15,-STK_OFF */
+-		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
+-		/* stg %w1,152(%r15) (backchain) */
++		/* la %bfp,unused_end(%r15) (BPF frame pointer) */
++		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15,
++			   offsetofend(struct prog_frame, unused));
++		/* aghi %r15,-frame_off */
++		EMIT4_IMM(0xa70b0000, REG_15, -jit->frame_off);
++		/* stg %w1,backchain(%r15) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
+-			      REG_15, 152);
++			      REG_15,
++			      offsetof(struct prog_frame, backchain));
+ 	}
+ }
+ 
+@@ -647,13 +661,13 @@ static void call_r1(struct bpf_jit *jit)
+ /*
+  * Function epilogue
+  */
+-static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
++static void bpf_jit_epilogue(struct bpf_jit *jit)
+ {
+ 	jit->exit_ip = jit->prg;
+ 	/* Load exit code: lgr %r2,%b0 */
+ 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
+ 	/* Restore registers */
+-	save_restore_regs(jit, REGS_RESTORE, stack_depth);
++	save_restore_regs(jit, REGS_RESTORE, 0);
+ 	if (nospec_uses_trampoline()) {
+ 		jit->r14_thunk_ip = jit->prg;
+ 		/* Generate __s390_indirect_jump_r14 thunk */
+@@ -779,7 +793,7 @@ static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
+  * stack space for the large switch statement.
+  */
+ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+-				 int i, bool extra_pass, u32 stack_depth)
++				 int i, bool extra_pass)
+ {
+ 	struct bpf_insn *insn = &fp->insnsi[i];
+ 	u32 dst_reg = insn->dst_reg;
+@@ -1425,17 +1439,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		jit->seen |= SEEN_FUNC;
+ 		/*
+ 		 * Copy the tail call counter to where the callee expects it.
+-		 *
+-		 * Note 1: The callee can increment the tail call counter, but
+-		 * we do not load it back, since the x86 JIT does not do this
+-		 * either.
+-		 *
+-		 * Note 2: We assume that the verifier does not let us call the
+-		 * main program, which clears the tail call counter on entry.
+ 		 */
+-		/* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
+-		_EMIT6(0xd203f000 | STK_OFF_TCCNT,
+-		       0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
++		/* mvc tail_call_cnt(4,%r15),frame_off+tail_call_cnt(%r15) */
++		_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | (jit->frame_off +
++				 offsetof(struct prog_frame, tail_call_cnt)));
+ 
+ 		/* Sign-extend the kfunc arguments. */
+ 		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+@@ -1457,6 +1465,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		call_r1(jit);
+ 		/* lgr %b0,%r2: load return value into %b0 */
+ 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
++
++		/*
++		 * Copy the potentially updated tail call counter back.
++		 */
++
++		if (insn->src_reg == BPF_PSEUDO_CALL)
++			/*
++			 * mvc frame_off+tail_call_cnt(%r15),
++			 *     tail_call_cnt(4,%r15)
++			 */
++			_EMIT6(0xd203f000 | (jit->frame_off +
++					     offsetof(struct prog_frame,
++						      tail_call_cnt)),
++			       0xf000 | offsetof(struct prog_frame,
++						 tail_call_cnt));
++
+ 		break;
+ 	}
+ 	case BPF_JMP | BPF_TAIL_CALL: {
+@@ -1486,10 +1510,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		 *         goto out;
+ 		 */
+ 
+-		if (jit->seen & SEEN_STACK)
+-			off = STK_OFF_TCCNT + STK_OFF + stack_depth;
+-		else
+-			off = STK_OFF_TCCNT;
++		off = jit->frame_off +
++		      offsetof(struct prog_frame, tail_call_cnt);
+ 		/* lhi %w0,1 */
+ 		EMIT4_IMM(0xa7080000, REG_W0, 1);
+ 		/* laal %w1,%w0,off(%r15) */
+@@ -1519,7 +1541,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/*
+ 		 * Restore registers before calling function
+ 		 */
+-		save_restore_regs(jit, REGS_RESTORE, stack_depth);
++		save_restore_regs(jit, REGS_RESTORE, 0);
+ 
+ 		/*
+ 		 * goto *(prog->bpf_func + tail_call_start);
+@@ -1822,7 +1844,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i)
+  * Compile eBPF program into s390x code
+  */
+ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
+-			bool extra_pass, u32 stack_depth)
++			bool extra_pass)
+ {
+ 	int i, insn_count, lit32_size, lit64_size;
+ 
+@@ -1830,19 +1852,25 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	jit->lit64 = jit->lit64_start;
+ 	jit->prg = 0;
+ 	jit->excnt = 0;
++	if (is_first_pass(jit) || (jit->seen & SEEN_STACK))
++		jit->frame_off = sizeof(struct prog_frame) -
++				 offsetofend(struct prog_frame, unused) +
++				 round_up(fp->aux->stack_depth, 8);
++	else
++		jit->frame_off = 0;
+ 
+-	bpf_jit_prologue(jit, fp, stack_depth);
++	bpf_jit_prologue(jit, fp);
+ 	if (bpf_set_addr(jit, 0) < 0)
+ 		return -1;
+ 	for (i = 0; i < fp->len; i += insn_count) {
+-		insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
++		insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
+ 		if (insn_count < 0)
+ 			return -1;
+ 		/* Next instruction address */
+ 		if (bpf_set_addr(jit, i + insn_count) < 0)
+ 			return -1;
+ 	}
+-	bpf_jit_epilogue(jit, stack_depth);
++	bpf_jit_epilogue(jit);
+ 
+ 	lit32_size = jit->lit32 - jit->lit32_start;
+ 	lit64_size = jit->lit64 - jit->lit64_start;
+@@ -1902,7 +1930,6 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
+  */
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ {
+-	u32 stack_depth = round_up(fp->aux->stack_depth, 8);
+ 	struct bpf_prog *tmp, *orig_fp = fp;
+ 	struct bpf_binary_header *header;
+ 	struct s390_jit_data *jit_data;
+@@ -1955,7 +1982,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ 	 *   - 3:   Calculate program size and addrs array
+ 	 */
+ 	for (pass = 1; pass <= 3; pass++) {
+-		if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
++		if (bpf_jit_prog(&jit, fp, extra_pass)) {
+ 			fp = orig_fp;
+ 			goto free_addrs;
+ 		}
+@@ -1969,7 +1996,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ 		goto free_addrs;
+ 	}
+ skip_init_ctx:
+-	if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
++	if (bpf_jit_prog(&jit, fp, extra_pass)) {
+ 		bpf_jit_binary_free(header);
+ 		fp = orig_fp;
+ 		goto free_addrs;
+@@ -2285,9 +2312,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	/* stg %r1,backchain_off(%r15) */
+ 	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
+ 		      tjit->backchain_off);
+-	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
++	/* mvc tccnt_off(4,%r15),stack_size+tail_call_cnt(%r15) */
+ 	_EMIT6(0xd203f000 | tjit->tccnt_off,
+-	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
++	       0xf000 | (tjit->stack_size +
++			 offsetof(struct prog_frame, tail_call_cnt)));
+ 	/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
+ 	if (nr_reg_args)
+ 		EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
+@@ -2424,8 +2452,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 				       (nr_stack_args * sizeof(u64) - 1) << 16 |
+ 				       tjit->stack_args_off,
+ 			       0xf000 | tjit->orig_stack_args_off);
+-		/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+-		_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
++		/* mvc tail_call_cnt(4,%r15),tccnt_off(%r15) */
++		_EMIT6(0xd203f000 | offsetof(struct prog_frame, tail_call_cnt),
++		       0xf000 | tjit->tccnt_off);
+ 		/* lgr %r1,%r8 */
+ 		EMIT4(0xb9040000, REG_1, REG_8);
+ 		/* %r1() */
+@@ -2433,6 +2462,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 		/* stg %r2,retval_off(%r15) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
+ 			      tjit->retval_off);
++		/* mvc tccnt_off(%r15),tail_call_cnt(4,%r15) */
++		_EMIT6(0xd203f000 | tjit->tccnt_off,
++		       0xf000 | offsetof(struct prog_frame, tail_call_cnt));
+ 
+ 		im->ip_after_call = jit->prg_buf + jit->prg;
+ 
+@@ -2482,8 +2514,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ 	if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
+ 			      tjit->retval_off);
+-	/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+-	_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
++	/* mvc stack_size+tail_call_cnt(4,%r15),tccnt_off(%r15) */
++	_EMIT6(0xd203f000 | (tjit->stack_size +
++			     offsetof(struct prog_frame, tail_call_cnt)),
+ 	       0xf000 | tjit->tccnt_off);
+ 	/* aghi %r15,stack_size */
+ 	EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
+diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
+index 06012e68bdcaec..284a4cafa4324c 100644
+--- a/arch/sparc/kernel/of_device_32.c
++++ b/arch/sparc/kernel/of_device_32.c
+@@ -387,6 +387,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
+ 
+ 	if (of_device_register(op)) {
+ 		printk("%pOF: Could not register of device.\n", dp);
++		put_device(&op->dev);
+ 		kfree(op);
+ 		op = NULL;
+ 	}
+diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
+index d3842821a5a050..d2cd4f42e0cf07 100644
+--- a/arch/sparc/kernel/of_device_64.c
++++ b/arch/sparc/kernel/of_device_64.c
+@@ -680,6 +680,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
+ 
+ 	if (of_device_register(op)) {
+ 		printk("%pOF: Could not register of device.\n", dp);
++		put_device(&op->dev);
+ 		kfree(op);
+ 		op = NULL;
+ 	}
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 8dad5d0995dcce..5bf77c07f6b620 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -133,6 +133,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 
+ static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+ {
++	unsigned long hugepage_size = _PAGE_SZ4MB_4U;
++
++	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4U;
++
++	switch (shift) {
++	case HPAGE_256MB_SHIFT:
++		hugepage_size = _PAGE_SZ256MB_4U;
++		pte_val(entry) |= _PAGE_PMD_HUGE;
++		break;
++	case HPAGE_SHIFT:
++		pte_val(entry) |= _PAGE_PMD_HUGE;
++		break;
++	case HPAGE_64K_SHIFT:
++		hugepage_size = _PAGE_SZ64K_4U;
++		break;
++	default:
++		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
++	}
++
++	pte_val(entry) = pte_val(entry) | hugepage_size;
+ 	return entry;
+ }
+ 
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 723e48b57bd0f8..425980eacaa841 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -661,6 +661,7 @@
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS	0xc0000300
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_CTL		0xc0000301
+ #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR	0xc0000302
++#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET	0xc0000303
+ 
+ /* AMD Last Branch Record MSRs */
+ #define MSR_AMD64_LBR_SELECT			0xc000010e
+diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
+index 5a4b21389b1d98..d432f3824f0c29 100644
+--- a/arch/x86/kernel/umip.c
++++ b/arch/x86/kernel/umip.c
+@@ -156,15 +156,26 @@ static int identify_insn(struct insn *insn)
+ 	if (!insn->modrm.nbytes)
+ 		return -EINVAL;
+ 
+-	/* All the instructions of interest start with 0x0f. */
+-	if (insn->opcode.bytes[0] != 0xf)
++	/* The instructions of interest have 2-byte opcodes: 0F 00 or 0F 01. */
++	if (insn->opcode.nbytes < 2 || insn->opcode.bytes[0] != 0xf)
+ 		return -EINVAL;
+ 
+ 	if (insn->opcode.bytes[1] == 0x1) {
+ 		switch (X86_MODRM_REG(insn->modrm.value)) {
+ 		case 0:
++			/* The reg form of 0F 01 /0 encodes VMX instructions. */
++			if (X86_MODRM_MOD(insn->modrm.value) == 3)
++				return -EINVAL;
++
+ 			return UMIP_INST_SGDT;
+ 		case 1:
++			/*
++			 * The reg form of 0F 01 /1 encodes MONITOR/MWAIT,
++			 * STAC/CLAC, and ENCLS.
++			 */
++			if (X86_MODRM_MOD(insn->modrm.value) == 3)
++				return -EINVAL;
++
+ 			return UMIP_INST_SIDT;
+ 		case 4:
+ 			return UMIP_INST_SMSW;
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index da2d82e3a8735e..f2cd8cfb0ef557 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -588,6 +588,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		msr_info->data = pmu->global_ctrl;
+ 		break;
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ 		msr_info->data = 0;
+ 		break;
+@@ -649,6 +650,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (!msr_info->host_initiated)
+ 			pmu->global_status &= ~data;
+ 		break;
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
++		if (!msr_info->host_initiated)
++			pmu->global_status |= data & ~pmu->global_status_mask;
++		break;
+ 	default:
+ 		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
+ 		return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index 3fd47de14b38a3..0bad24f763d222 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -117,6 +117,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 		return pmu->version > 1;
+ 	default:
+ 		if (msr > MSR_F15H_PERF_CTR5 &&
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 29566e457ec4b5..0833f2c1a9d68b 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4156,11 +4156,20 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+ 
+ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	/*
++	 * Next RIP must be provided as IRQs are disabled, and accessing guest
++	 * memory to decode the instruction might fault, i.e. might sleep.
++	 */
++	if (!nrips || !svm->vmcb->control.next_rip)
++		return EXIT_FASTPATH_NONE;
++
+ 	if (is_guest_mode(vcpu))
+ 		return EXIT_FASTPATH_NONE;
+ 
+-	if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+-	    to_svm(vcpu)->vmcb->control.exit_info_1)
++	if (svm->vmcb->control.exit_code == SVM_EXIT_MSR &&
++	    svm->vmcb->control.exit_info_1)
+ 		return handle_fastpath_set_msr_irqoff(vcpu);
+ 
+ 	return EXIT_FASTPATH_NONE;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 71d60d4e991fd3..a589a5781e9066 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1495,6 +1495,7 @@ static const u32 msrs_to_save_pmu[] = {
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ 	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
++	MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
+ };
+ 
+ static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
+@@ -7194,6 +7195,7 @@ static void kvm_probe_msr_to_save(u32 msr_index)
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
+ 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
++	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ 		if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
+ 			return;
+ 		break;
+diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
+index 178cf96ca10acb..f878bbb8b0ac2f 100644
+--- a/arch/xtensa/platforms/iss/simdisk.c
++++ b/arch/xtensa/platforms/iss/simdisk.c
+@@ -230,10 +230,14 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
+ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
+ 			size_t count, loff_t *ppos)
+ {
+-	char *tmp = memdup_user_nul(buf, count);
++	char *tmp;
+ 	struct simdisk *dev = pde_data(file_inode(file));
+ 	int err;
+ 
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
++
++	tmp = memdup_user_nul(buf, count);
+ 	if (IS_ERR(tmp))
+ 		return PTR_ERR(tmp);
+ 
+diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
+index e6468eab2681e9..7f1a9dba404979 100644
+--- a/block/blk-crypto-fallback.c
++++ b/block/blk-crypto-fallback.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/random.h>
+ #include <linux/scatterlist.h>
++#include <trace/events/block.h>
+ 
+ #include "blk-cgroup.h"
+ #include "blk-crypto-internal.h"
+@@ -229,7 +230,9 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
+ 			bio->bi_status = BLK_STS_RESOURCE;
+ 			return false;
+ 		}
++
+ 		bio_chain(split_bio, bio);
++		trace_block_split(split_bio, bio->bi_iter.bi_sector);
+ 		submit_bio_noacct(bio);
+ 		*bio_ptr = split_bio;
+ 	}
+diff --git a/crypto/essiv.c b/crypto/essiv.c
+index f7d4ef4837e541..4dbec116ddc3e0 100644
+--- a/crypto/essiv.c
++++ b/crypto/essiv.c
+@@ -186,9 +186,14 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
+ 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ 	struct aead_request *subreq = &rctx->aead_req;
++	int ivsize = crypto_aead_ivsize(tfm);
++	int ssize = req->assoclen - ivsize;
+ 	struct scatterlist *src = req->src;
+ 	int err;
+ 
++	if (ssize < 0)
++		return -EINVAL;
++
+ 	crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
+ 
+ 	/*
+@@ -198,19 +203,12 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	 */
+ 	rctx->assoc = NULL;
+ 	if (req->src == req->dst || !enc) {
+-		scatterwalk_map_and_copy(req->iv, req->dst,
+-					 req->assoclen - crypto_aead_ivsize(tfm),
+-					 crypto_aead_ivsize(tfm), 1);
++		scatterwalk_map_and_copy(req->iv, req->dst, ssize, ivsize, 1);
+ 	} else {
+ 		u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset;
+-		int ivsize = crypto_aead_ivsize(tfm);
+-		int ssize = req->assoclen - ivsize;
+ 		struct scatterlist *sg;
+ 		int nents;
+ 
+-		if (ssize < 0)
+-			return -EINVAL;
+-
+ 		nents = sg_nents_for_len(req->src, ssize);
+ 		if (nents < 0)
+ 			return -EINVAL;
+diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
+index d50261d05f3a1a..515b20d0b698a4 100644
+--- a/drivers/acpi/acpi_dbg.c
++++ b/drivers/acpi/acpi_dbg.c
+@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static int acpi_aml_read_user(char __user *buf, int len)
++static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
+ {
+-	int ret;
+ 	struct circ_buf *crc = &acpi_aml_io.out_crc;
+-	int n;
++	ssize_t ret;
++	size_t n;
+ 	char *p;
+ 
+ 	ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
+@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
+ 	/* sync head before removing logs */
+ 	smp_rmb();
+ 	p = &crc->buf[crc->tail];
+-	n = min(len, circ_count_to_end(crc));
++	n = min_t(size_t, len, circ_count_to_end(crc));
+ 	if (copy_to_user(buf, p, n)) {
+ 		ret = -EFAULT;
+ 		goto out;
+@@ -599,8 +599,8 @@ static int acpi_aml_read_user(char __user *buf, int len)
+ static ssize_t acpi_aml_read(struct file *file, char __user *buf,
+ 			     size_t count, loff_t *ppos)
+ {
+-	int ret = 0;
+-	int size = 0;
++	ssize_t ret = 0;
++	ssize_t size = 0;
+ 
+ 	if (!count)
+ 		return 0;
+@@ -639,11 +639,11 @@ static ssize_t acpi_aml_read(struct file *file, char __user *buf,
+ 	return size > 0 ? size : ret;
+ }
+ 
+-static int acpi_aml_write_user(const char __user *buf, int len)
++static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
+ {
+-	int ret;
+ 	struct circ_buf *crc = &acpi_aml_io.in_crc;
+-	int n;
++	ssize_t ret;
++	size_t n;
+ 	char *p;
+ 
+ 	ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
+@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
+ 	/* sync tail before inserting cmds */
+ 	smp_mb();
+ 	p = &crc->buf[crc->head];
+-	n = min(len, circ_space_to_end(crc));
++	n = min_t(size_t, len, circ_space_to_end(crc));
+ 	if (copy_from_user(p, buf, n)) {
+ 		ret = -EFAULT;
+ 		goto out;
+@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
+ 	ret = n;
+ out:
+ 	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
+-	return n;
++	return ret;
+ }
+ 
+ static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
+ 			      size_t count, loff_t *ppos)
+ {
+-	int ret = 0;
+-	int size = 0;
++	ssize_t ret = 0;
++	ssize_t size = 0;
+ 
+ 	if (!count)
+ 		return 0;
+diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
+index 33c3b16af556b5..ecba82ac7cd5f9 100644
+--- a/drivers/acpi/acpi_tad.c
++++ b/drivers/acpi/acpi_tad.c
+@@ -564,6 +564,9 @@ static int acpi_tad_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(dev);
+ 
++	if (dd->capabilities & ACPI_TAD_RT)
++		sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
++
+ 	if (dd->capabilities & ACPI_TAD_DC_WAKE)
+ 		sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
+ 
+diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
+index 989dc01af03fbb..bc205b3309043b 100644
+--- a/drivers/acpi/acpica/evglock.c
++++ b/drivers/acpi/acpica/evglock.c
+@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
+ 		return_ACPI_STATUS(AE_OK);
+ 	}
+ 
++	if (!acpi_gbl_use_global_lock) {
++		return_ACPI_STATUS(AE_OK);
++	}
++
+ 	/* Attempt installation of the global lock handler */
+ 
+ 	status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index e3cbaf3c3bbc15..a70b6db3bf0596 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -94,7 +94,7 @@ enum {
+ 
+ struct acpi_battery {
+ 	struct mutex lock;
+-	struct mutex sysfs_lock;
++	struct mutex update_lock;
+ 	struct power_supply *bat;
+ 	struct power_supply_desc bat_desc;
+ 	struct acpi_device *device;
+@@ -888,15 +888,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
+ 
+ static void sysfs_remove_battery(struct acpi_battery *battery)
+ {
+-	mutex_lock(&battery->sysfs_lock);
+-	if (!battery->bat) {
+-		mutex_unlock(&battery->sysfs_lock);
++	if (!battery->bat)
+ 		return;
+-	}
++
+ 	battery_hook_remove_battery(battery);
+ 	power_supply_unregister(battery->bat);
+ 	battery->bat = NULL;
+-	mutex_unlock(&battery->sysfs_lock);
+ }
+ 
+ static void find_battery(const struct dmi_header *dm, void *private)
+@@ -1056,6 +1053,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+ 
+ 	if (!battery)
+ 		return;
++
++	guard(mutex)(&battery->update_lock);
++
+ 	old = battery->bat;
+ 	/*
+ 	 * On Acer Aspire V5-573G notifications are sometimes triggered too
+@@ -1078,21 +1078,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+ }
+ 
+ static int battery_notify(struct notifier_block *nb,
+-			       unsigned long mode, void *_unused)
++			  unsigned long mode, void *_unused)
+ {
+ 	struct acpi_battery *battery = container_of(nb, struct acpi_battery,
+ 						    pm_nb);
+-	int result;
+ 
+-	switch (mode) {
+-	case PM_POST_HIBERNATION:
+-	case PM_POST_SUSPEND:
++	if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
++		guard(mutex)(&battery->update_lock);
++
+ 		if (!acpi_battery_present(battery))
+ 			return 0;
+ 
+ 		if (battery->bat) {
+ 			acpi_battery_refresh(battery);
+ 		} else {
++			int result;
++
+ 			result = acpi_battery_get_info(battery);
+ 			if (result)
+ 				return result;
+@@ -1104,7 +1105,6 @@ static int battery_notify(struct notifier_block *nb,
+ 
+ 		acpi_battery_init_alarm(battery);
+ 		acpi_battery_get_state(battery);
+-		break;
+ 	}
+ 
+ 	return 0;
+@@ -1182,6 +1182,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
+ {
+ 	int retry, ret;
+ 
++	guard(mutex)(&battery->update_lock);
++
+ 	for (retry = 5; retry; retry--) {
+ 		ret = acpi_battery_update(battery, false);
+ 		if (!ret)
+@@ -1192,6 +1194,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
+ 	return ret;
+ }
+ 
++static void sysfs_battery_cleanup(struct acpi_battery *battery)
++{
++	guard(mutex)(&battery->update_lock);
++
++	sysfs_remove_battery(battery);
++}
++
+ static int acpi_battery_add(struct acpi_device *device)
+ {
+ 	int result = 0;
+@@ -1203,15 +1212,21 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	if (device->dep_unmet)
+ 		return -EPROBE_DEFER;
+ 
+-	battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
++	battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
+ 	if (!battery)
+ 		return -ENOMEM;
+ 	battery->device = device;
+ 	strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
+ 	strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
+ 	device->driver_data = battery;
+-	mutex_init(&battery->lock);
+-	mutex_init(&battery->sysfs_lock);
++	result = devm_mutex_init(&device->dev, &battery->lock);
++	if (result)
++		return result;
++
++	result = devm_mutex_init(&device->dev, &battery->update_lock);
++	if (result)
++		return result;
++
+ 	if (acpi_has_method(battery->device->handle, "_BIX"))
+ 		set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+ 
+@@ -1238,10 +1253,7 @@ static int acpi_battery_add(struct acpi_device *device)
+ 	device_init_wakeup(&device->dev, 0);
+ 	unregister_pm_notifier(&battery->pm_nb);
+ fail:
+-	sysfs_remove_battery(battery);
+-	mutex_destroy(&battery->lock);
+-	mutex_destroy(&battery->sysfs_lock);
+-	kfree(battery);
++	sysfs_battery_cleanup(battery);
+ 
+ 	return result;
+ }
+@@ -1260,11 +1272,10 @@ static void acpi_battery_remove(struct acpi_device *device)
+ 
+ 	device_init_wakeup(&device->dev, 0);
+ 	unregister_pm_notifier(&battery->pm_nb);
+-	sysfs_remove_battery(battery);
+ 
+-	mutex_destroy(&battery->lock);
+-	mutex_destroy(&battery->sysfs_lock);
+-	kfree(battery);
++	guard(mutex)(&battery->update_lock);
++
++	sysfs_remove_battery(battery);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -1281,6 +1292,9 @@ static int acpi_battery_resume(struct device *dev)
+ 		return -EINVAL;
+ 
+ 	battery->update_time = 0;
++
++	guard(mutex)(&battery->update_lock);
++
+ 	acpi_battery_update(battery, true);
+ 	return 0;
+ }
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index dca5682308cb32..5898c3c8c2a7f2 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -74,6 +74,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 					struct fwnode_handle *parent)
+ {
+ 	struct acpi_data_node *dn;
++	acpi_handle scope = NULL;
+ 	bool result;
+ 
+ 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
+@@ -86,29 +87,35 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 	INIT_LIST_HEAD(&dn->data.properties);
+ 	INIT_LIST_HEAD(&dn->data.subnodes);
+ 
+-	result = acpi_extract_properties(handle, desc, &dn->data);
+-
+-	if (handle) {
+-		acpi_handle scope;
+-		acpi_status status;
++	/*
++	 * The scope for the completion of relative pathname segments and
++	 * subnode object lookup is the one of the namespace node (device)
++	 * containing the object that has returned the package.  That is, it's
++	 * the scope of that object's parent device.
++	 */
++	if (handle)
++		acpi_get_parent(handle, &scope);
+ 
+-		/*
+-		 * The scope for the subnode object lookup is the one of the
+-		 * namespace node (device) containing the object that has
+-		 * returned the package.  That is, it's the scope of that
+-		 * object's parent.
+-		 */
+-		status = acpi_get_parent(handle, &scope);
+-		if (ACPI_SUCCESS(status)
+-		    && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
+-						      &dn->fwnode))
+-			result = true;
+-	} else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
+-						  &dn->fwnode)) {
++	/*
++	 * Extract properties from the _DSD-equivalent package pointed to by
++	 * desc and use scope (if not NULL) for the completion of relative
++	 * pathname segments.
++	 *
++	 * The extracted properties will be held in the new data node dn.
++	 */
++	result = acpi_extract_properties(scope, desc, &dn->data);
++	/*
++	 * Look for subnodes in the _DSD-equivalent package pointed to by desc
++	 * and create child nodes of dn if there are any.
++	 */
++	if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
+ 		result = true;
+-	}
+ 
+ 	if (result) {
++		/*
++		 * This will be NULL if the desc package is embedded in an outer
++		 * _DSD-equivalent package and its scope cannot be determined.
++		 */
+ 		dn->handle = handle;
+ 		dn->data.pointer = desc;
+ 		list_add_tail(&dn->sibling, list);
+@@ -120,35 +127,21 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
+ 	return false;
+ }
+ 
+-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+-					const union acpi_object *link,
+-					struct list_head *list,
+-					struct fwnode_handle *parent)
+-{
+-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+-	acpi_status status;
+-
+-	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+-					    ACPI_TYPE_PACKAGE);
+-	if (ACPI_FAILURE(status))
+-		return false;
+-
+-	if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+-					parent))
+-		return true;
+-
+-	ACPI_FREE(buf.pointer);
+-	return false;
+-}
+-
+ static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ 				   const union acpi_object *link,
+ 				   struct list_head *list,
+ 				   struct fwnode_handle *parent)
+ {
++	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ 	acpi_handle handle;
+ 	acpi_status status;
+ 
++	/*
++	 * If the scope is unknown, the _DSD-equivalent package being parsed
++	 * was embedded in an outer _DSD-equivalent package as a result of
++	 * direct evaluation of an object pointed to by a reference.  In that
++	 * case, using a pathname as the target object pointer is invalid.
++	 */
+ 	if (!scope)
+ 		return false;
+ 
+@@ -157,7 +150,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
+ 	if (ACPI_FAILURE(status))
+ 		return false;
+ 
+-	return acpi_nondev_subnode_data_ok(handle, link, list, parent);
++	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
++					    ACPI_TYPE_PACKAGE);
++	if (ACPI_FAILURE(status))
++		return false;
++
++	if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
++					parent))
++		return true;
++
++	ACPI_FREE(buf.pointer);
++	return false;
+ }
+ 
+ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+@@ -168,9 +171,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ 	bool ret = false;
+ 	int i;
+ 
++	/*
++	 * Every element in the links package is expected to represent a link
++	 * to a non-device node in a tree containing device-specific data.
++	 */
+ 	for (i = 0; i < links->package.count; i++) {
+ 		union acpi_object *link, *desc;
+-		acpi_handle handle;
+ 		bool result;
+ 
+ 		link = &links->package.elements[i];
+@@ -178,26 +184,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+ 		if (link->package.count != 2)
+ 			continue;
+ 
+-		/* The first one must be a string. */
++		/* The first one (the key) must be a string. */
+ 		if (link->package.elements[0].type != ACPI_TYPE_STRING)
+ 			continue;
+ 
+-		/* The second one may be a string, a reference or a package. */
++		/* The second one (the target) may be a string or a package. */
+ 		switch (link->package.elements[1].type) {
+ 		case ACPI_TYPE_STRING:
++			/*
++			 * The string is expected to be a full pathname or a
++			 * pathname segment relative to the given scope.  That
++			 * pathname is expected to point to an object returning
++			 * a package that contains _DSD-equivalent information.
++			 */
+ 			result = acpi_nondev_subnode_ok(scope, link, list,
+ 							 parent);
+ 			break;
+-		case ACPI_TYPE_LOCAL_REFERENCE:
+-			handle = link->package.elements[1].reference.handle;
+-			result = acpi_nondev_subnode_data_ok(handle, link, list,
+-							     parent);
+-			break;
+ 		case ACPI_TYPE_PACKAGE:
++			/*
++			 * This happens when a reference is used in AML to
++			 * point to the target.  Since the target is expected
++			 * to be a named object, a reference to it will cause it
++			 * to be avaluated in place and its return package will
++			 * be embedded in the links package at the location of
++			 * the reference.
++			 *
++			 * The target package is expected to contain _DSD-
++			 * equivalent information, but the scope in which it
++			 * is located in the original AML is unknown.  Thus
++			 * it cannot contain pathname segments represented as
++			 * strings because there is no way to build full
++			 * pathnames out of them.
++			 */
++			acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
++					  link->package.elements[0].string.pointer);
+ 			desc = &link->package.elements[1];
+ 			result = acpi_nondev_subnode_extract(desc, NULL, link,
+ 							     list, parent);
+ 			break;
++		case ACPI_TYPE_LOCAL_REFERENCE:
++			/*
++			 * It is not expected to see any local references in
++			 * the links package because referencing a named object
++			 * should cause it to be evaluated in place.
++			 */
++			acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
++					 link->package.elements[0].string.pointer);
++			fallthrough;
+ 		default:
+ 			result = false;
+ 			break;
+@@ -357,6 +390,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
+ 	struct acpi_data_node *dn;
+ 
+ 	list_for_each_entry(dn, &data->subnodes, sibling) {
++		if (!dn->handle)
++			continue;
++
+ 		acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+ 
+ 		acpi_untie_nondev_subnodes(&dn->data);
+@@ -371,6 +407,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
+ 		acpi_status status;
+ 		bool ret;
+ 
++		if (!dn->handle)
++			continue;
++
+ 		status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+ 		if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ 			acpi_handle_err(dn->handle, "Can't tag data node\n");
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index c48f4d9f2c690b..558cfb3a06c5ea 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -387,17 +387,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ {
+ 	struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+-	size_t tr_len, read_offset, write_offset;
++	size_t tr_len, read_offset;
+ 	struct mhi_ep_buf_info buf_info = {};
+ 	u32 len = MHI_EP_DEFAULT_MTU;
+ 	struct mhi_ring_element *el;
+-	bool tr_done = false;
+ 	void *buf_addr;
+-	u32 buf_left;
+ 	int ret;
+ 
+-	buf_left = len;
+-
+ 	do {
+ 		/* Don't process the transfer ring if the channel is not in RUNNING state */
+ 		if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+@@ -410,24 +406,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ 		/* Check if there is data pending to be read from previous read operation */
+ 		if (mhi_chan->tre_bytes_left) {
+ 			dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+-			tr_len = min(buf_left, mhi_chan->tre_bytes_left);
++			tr_len = min(len, mhi_chan->tre_bytes_left);
+ 		} else {
+ 			mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ 			mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ 			mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+ 
+-			tr_len = min(buf_left, mhi_chan->tre_size);
++			tr_len = min(len, mhi_chan->tre_size);
+ 		}
+ 
+ 		read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+-		write_offset = len - buf_left;
+ 
+ 		buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
+ 		if (!buf_addr)
+ 			return -ENOMEM;
+ 
+ 		buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+-		buf_info.dev_addr = buf_addr + write_offset;
++		buf_info.dev_addr = buf_addr;
+ 		buf_info.size = tr_len;
+ 		buf_info.cb = mhi_ep_read_completion;
+ 		buf_info.cb_buf = buf_addr;
+@@ -443,16 +438,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ 			goto err_free_buf_addr;
+ 		}
+ 
+-		buf_left -= tr_len;
+ 		mhi_chan->tre_bytes_left -= tr_len;
+ 
+-		if (!mhi_chan->tre_bytes_left) {
+-			if (MHI_TRE_DATA_GET_IEOT(el))
+-				tr_done = true;
+-
++		if (!mhi_chan->tre_bytes_left)
+ 			mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
+-		}
+-	} while (buf_left && !tr_done);
++	/* Read until the some buffer is left or the ring becomes not empty */
++	} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ 
+ 	return 0;
+ 
+@@ -486,15 +477,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
+ 		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ 	} else {
+ 		/* UL channel */
+-		do {
+-			ret = mhi_ep_read_channel(mhi_cntrl, ring);
+-			if (ret < 0) {
+-				dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+-				return ret;
+-			}
+-
+-			/* Read until the ring becomes empty */
+-		} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
++		ret = mhi_ep_read_channel(mhi_cntrl, ring);
++		if (ret < 0) {
++			dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index cfd17c02fe20ef..4af748ff509855 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -164,7 +164,6 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
+ 	int i, ret;
+ 
+@@ -191,7 +190,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ 			continue;
+ 
+ 		if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+-			dev_err(dev, "irq %d not available for event ring\n",
++			dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
+ 				mhi_event->irq);
+ 			ret = -EINVAL;
+ 			goto error_request;
+@@ -202,7 +201,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+ 				  irq_flags,
+ 				  "mhi", mhi_event);
+ 		if (ret) {
+-			dev_err(dev, "Error requesting irq:%d for ev:%d\n",
++			dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
+ 				mhi_cntrl->irq[mhi_event->irq], i);
+ 			goto error_request;
+ 		}
+diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
+index ecfcb50302f6ce..efda90dcf5b3d0 100644
+--- a/drivers/char/ipmi/ipmi_kcs_sm.c
++++ b/drivers/char/ipmi/ipmi_kcs_sm.c
+@@ -122,10 +122,10 @@ struct si_sm_data {
+ 	unsigned long  error0_timeout;
+ };
+ 
+-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+-				  struct si_sm_io *io, enum kcs_states state)
++static unsigned int init_kcs_data(struct si_sm_data *kcs,
++				  struct si_sm_io *io)
+ {
+-	kcs->state = state;
++	kcs->state = KCS_IDLE;
+ 	kcs->io = io;
+ 	kcs->write_pos = 0;
+ 	kcs->write_count = 0;
+@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
+ 	return 2;
+ }
+ 
+-static unsigned int init_kcs_data(struct si_sm_data *kcs,
+-				  struct si_sm_io *io)
+-{
+-	return init_kcs_data_with_state(kcs, io, KCS_IDLE);
+-}
+-
+ static inline unsigned char read_status(struct si_sm_data *kcs)
+ {
+ 	return kcs->io->inputb(kcs->io, 1);
+@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
+ 	if (size > MAX_KCS_WRITE_SIZE)
+ 		return IPMI_REQ_LEN_EXCEEDED_ERR;
+ 
+-	if (kcs->state != KCS_IDLE) {
++	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ 		dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
+ 		return IPMI_NOT_IN_MY_STATE_ERR;
+ 	}
+@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
+ 	}
+ 
+ 	if (kcs->state == KCS_HOSED) {
+-		init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
++		init_kcs_data(kcs, kcs->io);
+ 		return SI_SM_HOSED;
+ 	}
+ 
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 96f175bd6d9fb7..b7d8bf202ed2d0 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -39,7 +39,9 @@
+ 
+ #define IPMI_DRIVER_VERSION "39.2"
+ 
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++				   struct ipmi_user *user);
+ static int ipmi_init_msghandler(void);
+ static void smi_recv_tasklet(struct tasklet_struct *t);
+ static void handle_new_recv_msgs(struct ipmi_smi *intf);
+@@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
+ 		 * risk.  At this moment, simply skip it in that case.
+ 		 */
+ 		ipmi_free_recv_msg(msg);
+-		atomic_dec(&msg->user->nr_msgs);
+ 	} else {
+ 		int index;
+ 		struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+ 
+ 		if (user) {
+-			atomic_dec(&user->nr_msgs);
+ 			user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ 			release_ipmi_user(user, index);
+ 		} else {
+@@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
+ 		spin_unlock_irqrestore(&intf->events_lock, flags);
+ 
+ 		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+-			msg->user = user;
+-			kref_get(&user->refcount);
++			ipmi_set_recv_msg_user(msg, user);
+ 			deliver_local_response(intf, msg);
+ 		}
+ 
+@@ -2309,22 +2308,18 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 	struct ipmi_recv_msg *recv_msg;
+ 	int rv = 0;
+ 
+-	if (user) {
+-		if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+-			/* Decrement will happen at the end of the routine. */
+-			rv = -EBUSY;
+-			goto out;
+-		}
+-	}
+-
+-	if (supplied_recv)
++	if (supplied_recv) {
+ 		recv_msg = supplied_recv;
+-	else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (recv_msg == NULL) {
+-			rv = -ENOMEM;
+-			goto out;
++		recv_msg->user = user;
++		if (user) {
++			atomic_inc(&user->nr_msgs);
++			/* The put happens when the message is freed. */
++			kref_get(&user->refcount);
+ 		}
++	} else {
++		recv_msg = ipmi_alloc_recv_msg(user);
++		if (IS_ERR(recv_msg))
++			return PTR_ERR(recv_msg);
+ 	}
+ 	recv_msg->user_msg_data = user_msg_data;
+ 
+@@ -2335,8 +2330,7 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 		if (smi_msg == NULL) {
+ 			if (!supplied_recv)
+ 				ipmi_free_recv_msg(recv_msg);
+-			rv = -ENOMEM;
+-			goto out;
++			return -ENOMEM;
+ 		}
+ 	}
+ 
+@@ -2346,10 +2340,6 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 		goto out_err;
+ 	}
+ 
+-	recv_msg->user = user;
+-	if (user)
+-		/* The put happens when the message is freed. */
+-		kref_get(&user->refcount);
+ 	recv_msg->msgid = msgid;
+ 	/*
+ 	 * Store the message to send in the receive message so timeout
+@@ -2378,8 +2368,10 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 
+ 	if (rv) {
+ out_err:
+-		ipmi_free_smi_msg(smi_msg);
+-		ipmi_free_recv_msg(recv_msg);
++		if (!supplied_smi)
++			ipmi_free_smi_msg(smi_msg);
++		if (!supplied_recv)
++			ipmi_free_recv_msg(recv_msg);
+ 	} else {
+ 		dev_dbg(intf->si_dev, "Send: %*ph\n",
+ 			smi_msg->data_size, smi_msg->data);
+@@ -2388,9 +2380,6 @@ static int i_ipmi_request(struct ipmi_user     *user,
+ 	}
+ 	rcu_read_unlock();
+ 
+-out:
+-	if (rv && user)
+-		atomic_dec(&user->nr_msgs);
+ 	return rv;
+ }
+ 
+@@ -3883,7 +3872,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char            chan;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_ipmb_addr    *ipmb_addr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 
+ 	if (msg->rsp_size < 10) {
+ 		/* Message not big enough, just ignore it. */
+@@ -3904,9 +3893,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -3941,47 +3929,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
+ 			rv = -1;
+ 		}
+ 		rcu_read_unlock();
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+-			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+-			ipmb_addr->slave_addr = msg->rsp[6];
+-			ipmb_addr->lun = msg->rsp[7] & 3;
+-			ipmb_addr->channel = msg->rsp[3] & 0xf;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
++		ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
++		ipmb_addr->slave_addr = msg->rsp[6];
++		ipmb_addr->lun = msg->rsp[7] & 3;
++		ipmb_addr->channel = msg->rsp[3] & 0xf;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = msg->rsp[7] >> 2;
+-			recv_msg->msg.netfn = msg->rsp[4] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[8];
+-			recv_msg->msg.data = recv_msg->msg_data;
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = msg->rsp[7] >> 2;
++		recv_msg->msg.netfn = msg->rsp[4] >> 2;
++		recv_msg->msg.cmd = msg->rsp[8];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * We chop off 10, not 9 bytes because the checksum
+-			 * at the end also needs to be removed.
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 10;
+-			memcpy(recv_msg->msg_data, &msg->rsp[9],
+-			       msg->rsp_size - 10);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * We chop off 10, not 9 bytes because the checksum
++		 * at the end also needs to be removed.
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 10;
++		memcpy(recv_msg->msg_data, &msg->rsp[9],
++		       msg->rsp_size - 10);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -3994,7 +3976,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 	int                      rv = 0;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_ipmb_direct_addr *daddr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 	unsigned char netfn = msg->rsp[0] >> 2;
+ 	unsigned char cmd = msg->rsp[3];
+ 
+@@ -4003,9 +3985,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4032,44 +4013,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ 			rv = -1;
+ 		}
+ 		rcu_read_unlock();
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+-			daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+-			daddr->channel = 0;
+-			daddr->slave_addr = msg->rsp[1];
+-			daddr->rs_lun = msg->rsp[0] & 3;
+-			daddr->rq_lun = msg->rsp[2] & 3;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
++		daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
++		daddr->channel = 0;
++		daddr->slave_addr = msg->rsp[1];
++		daddr->rs_lun = msg->rsp[0] & 3;
++		daddr->rq_lun = msg->rsp[2] & 3;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = (msg->rsp[2] >> 2);
+-			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[3];
+-			recv_msg->msg.data = recv_msg->msg_data;
+-
+-			recv_msg->msg.data_len = msg->rsp_size - 4;
+-			memcpy(recv_msg->msg_data, msg->rsp + 4,
+-			       msg->rsp_size - 4);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = (msg->rsp[2] >> 2);
++		recv_msg->msg.netfn = msg->rsp[0] >> 2;
++		recv_msg->msg.cmd = msg->rsp[3];
++		recv_msg->msg.data = recv_msg->msg_data;
++
++		recv_msg->msg.data_len = msg->rsp_size - 4;
++		memcpy(recv_msg->msg_data, msg->rsp + 4,
++		       msg->rsp_size - 4);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4183,7 +4158,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char            chan;
+ 	struct ipmi_user         *user = NULL;
+ 	struct ipmi_lan_addr     *lan_addr;
+-	struct ipmi_recv_msg     *recv_msg;
++	struct ipmi_recv_msg     *recv_msg = NULL;
+ 
+ 	if (msg->rsp_size < 12) {
+ 		/* Message not big enough, just ignore it. */
+@@ -4204,9 +4179,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4218,49 +4192,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
+ 		 * them to be freed.
+ 		 */
+ 		rv = 0;
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/* Extract the source address from the data. */
+-			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+-			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+-			lan_addr->session_handle = msg->rsp[4];
+-			lan_addr->remote_SWID = msg->rsp[8];
+-			lan_addr->local_SWID = msg->rsp[5];
+-			lan_addr->lun = msg->rsp[9] & 3;
+-			lan_addr->channel = msg->rsp[3] & 0xf;
+-			lan_addr->privilege = msg->rsp[3] >> 4;
++	} else if (!IS_ERR(recv_msg)) {
++		/* Extract the source address from the data. */
++		lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
++		lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
++		lan_addr->session_handle = msg->rsp[4];
++		lan_addr->remote_SWID = msg->rsp[8];
++		lan_addr->local_SWID = msg->rsp[5];
++		lan_addr->lun = msg->rsp[9] & 3;
++		lan_addr->channel = msg->rsp[3] & 0xf;
++		lan_addr->privilege = msg->rsp[3] >> 4;
+ 
+-			/*
+-			 * Extract the rest of the message information
+-			 * from the IPMB header.
+-			 */
+-			recv_msg->user = user;
+-			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-			recv_msg->msgid = msg->rsp[9] >> 2;
+-			recv_msg->msg.netfn = msg->rsp[6] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[10];
+-			recv_msg->msg.data = recv_msg->msg_data;
++		/*
++		 * Extract the rest of the message information
++		 * from the IPMB header.
++		 */
++		recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++		recv_msg->msgid = msg->rsp[9] >> 2;
++		recv_msg->msg.netfn = msg->rsp[6] >> 2;
++		recv_msg->msg.cmd = msg->rsp[10];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * We chop off 12, not 11 bytes because the checksum
+-			 * at the end also needs to be removed.
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 12;
+-			memcpy(recv_msg->msg_data, &msg->rsp[11],
+-			       msg->rsp_size - 12);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * We chop off 12, not 11 bytes because the checksum
++		 * at the end also needs to be removed.
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 12;
++		memcpy(recv_msg->msg_data, &msg->rsp[11],
++		       msg->rsp_size - 12);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4282,7 +4251,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 	unsigned char         chan;
+ 	struct ipmi_user *user = NULL;
+ 	struct ipmi_system_interface_addr *smi_addr;
+-	struct ipmi_recv_msg  *recv_msg;
++	struct ipmi_recv_msg  *recv_msg = NULL;
+ 
+ 	/*
+ 	 * We expect the OEM SW to perform error checking
+@@ -4311,9 +4280,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ 	if (rcvr) {
+ 		user = rcvr->user;
+-		kref_get(&user->refcount);
+-	} else
+-		user = NULL;
++		recv_msg = ipmi_alloc_recv_msg(user);
++	}
+ 	rcu_read_unlock();
+ 
+ 	if (user == NULL) {
+@@ -4326,48 +4294,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
+ 		 */
+ 
+ 		rv = 0;
+-	} else {
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
+-			/*
+-			 * We couldn't allocate memory for the
+-			 * message, so requeue it for handling
+-			 * later.
+-			 */
+-			rv = 1;
+-			kref_put(&user->refcount, free_user);
+-		} else {
+-			/*
+-			 * OEM Messages are expected to be delivered via
+-			 * the system interface to SMS software.  We might
+-			 * need to visit this again depending on OEM
+-			 * requirements
+-			 */
+-			smi_addr = ((struct ipmi_system_interface_addr *)
+-				    &recv_msg->addr);
+-			smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+-			smi_addr->channel = IPMI_BMC_CHANNEL;
+-			smi_addr->lun = msg->rsp[0] & 3;
+-
+-			recv_msg->user = user;
+-			recv_msg->user_msg_data = NULL;
+-			recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+-			recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-			recv_msg->msg.cmd = msg->rsp[1];
+-			recv_msg->msg.data = recv_msg->msg_data;
++	} else if (!IS_ERR(recv_msg)) {
++		/*
++		 * OEM Messages are expected to be delivered via
++		 * the system interface to SMS software.  We might
++		 * need to visit this again depending on OEM
++		 * requirements
++		 */
++		smi_addr = ((struct ipmi_system_interface_addr *)
++			    &recv_msg->addr);
++		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
++		smi_addr->channel = IPMI_BMC_CHANNEL;
++		smi_addr->lun = msg->rsp[0] & 3;
++
++		recv_msg->user_msg_data = NULL;
++		recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
++		recv_msg->msg.netfn = msg->rsp[0] >> 2;
++		recv_msg->msg.cmd = msg->rsp[1];
++		recv_msg->msg.data = recv_msg->msg_data;
+ 
+-			/*
+-			 * The message starts at byte 4 which follows the
+-			 * Channel Byte in the "GET MESSAGE" command
+-			 */
+-			recv_msg->msg.data_len = msg->rsp_size - 4;
+-			memcpy(recv_msg->msg_data, &msg->rsp[4],
+-			       msg->rsp_size - 4);
+-			if (deliver_response(intf, recv_msg))
+-				ipmi_inc_stat(intf, unhandled_commands);
+-			else
+-				ipmi_inc_stat(intf, handled_commands);
+-		}
++		/*
++		 * The message starts at byte 4 which follows the
++		 * Channel Byte in the "GET MESSAGE" command
++		 */
++		recv_msg->msg.data_len = msg->rsp_size - 4;
++		memcpy(recv_msg->msg_data, &msg->rsp[4],
++		       msg->rsp_size - 4);
++		if (deliver_response(intf, recv_msg))
++			ipmi_inc_stat(intf, unhandled_commands);
++		else
++			ipmi_inc_stat(intf, handled_commands);
++	} else {
++		/*
++		 * We couldn't allocate memory for the message, so
++		 * requeue it for handling later.
++		 */
++		rv = 1;
+ 	}
+ 
+ 	return rv;
+@@ -4426,8 +4388,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		if (!user->gets_events)
+ 			continue;
+ 
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
++		recv_msg = ipmi_alloc_recv_msg(user);
++		if (IS_ERR(recv_msg)) {
+ 			rcu_read_unlock();
+ 			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ 						 link) {
+@@ -4446,8 +4408,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		deliver_count++;
+ 
+ 		copy_event_into_recv_msg(recv_msg, msg);
+-		recv_msg->user = user;
+-		kref_get(&user->refcount);
+ 		list_add_tail(&recv_msg->link, &msgs);
+ 	}
+ 	srcu_read_unlock(&intf->users_srcu, index);
+@@ -4463,8 +4423,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
+ 		 * No one to receive the message, put it in queue if there's
+ 		 * not already too many things in the queue.
+ 		 */
+-		recv_msg = ipmi_alloc_recv_msg();
+-		if (!recv_msg) {
++		recv_msg = ipmi_alloc_recv_msg(NULL);
++		if (IS_ERR(recv_msg)) {
+ 			/*
+ 			 * We couldn't allocate memory for the
+ 			 * message, so requeue it for handling
+@@ -5156,27 +5116,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
+ 		kfree(msg);
+ }
+ 
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
+ {
+ 	struct ipmi_recv_msg *rv;
+ 
++	if (user) {
++		if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
++			atomic_dec(&user->nr_msgs);
++			return ERR_PTR(-EBUSY);
++		}
++	}
++
+ 	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+-	if (rv) {
+-		rv->user = NULL;
+-		rv->done = free_recv_msg;
+-		atomic_inc(&recv_msg_inuse_count);
++	if (!rv) {
++		if (user)
++			atomic_dec(&user->nr_msgs);
++		return ERR_PTR(-ENOMEM);
+ 	}
++
++	rv->user = user;
++	rv->done = free_recv_msg;
++	if (user)
++		kref_get(&user->refcount);
++	atomic_inc(&recv_msg_inuse_count);
+ 	return rv;
+ }
+ 
+ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+ {
+-	if (msg->user && !oops_in_progress)
++	if (msg->user && !oops_in_progress) {
++		atomic_dec(&msg->user->nr_msgs);
+ 		kref_put(&msg->user->refcount, free_user);
++	}
+ 	msg->done(msg);
+ }
+ EXPORT_SYMBOL(ipmi_free_recv_msg);
+ 
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++				   struct ipmi_user *user)
++{
++	WARN_ON_ONCE(msg->user); /* User should not be set. */
++	msg->user = user;
++	atomic_inc(&user->nr_msgs);
++	kref_get(&user->refcount);
++}
++
+ static atomic_t panic_done_count = ATOMIC_INIT(0);
+ 
+ static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index c71e61ccb95a2b..5e6ee5b82b8ff4 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -977,8 +977,8 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	 * will call disable_irq which undoes all of the above.
+ 	 */
+ 	if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-		tpm_tis_write8(priv, original_int_vec,
+-			       TPM_INT_VECTOR(priv->locality));
++		tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
++			       original_int_vec);
+ 		rc = -1;
+ 	}
+ 
+diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
+index c173a44c800aa8..629f050a855aae 100644
+--- a/drivers/clk/at91/clk-peripheral.c
++++ b/drivers/clk/at91/clk-peripheral.c
+@@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
+ 	long best_diff = LONG_MIN;
+ 	u32 shift;
+ 
+-	if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+-		return parent_rate;
++	if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
++		req->rate = parent_rate;
++
++		return 0;
++	}
+ 
+ 	/* Fist step: check the available dividers. */
+ 	for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+index dfba6eb61ccfee..4ecdf9ae024439 100644
+--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
++++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
+ 	GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
+ 	GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
+ 	/* INFRA_AO1 */
+-	GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
++	GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
+ 	GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
+index c93bc7f926e5d4..359f92df826b5e 100644
+--- a/drivers/clk/mediatek/clk-mux.c
++++ b/drivers/clk/mediatek/clk-mux.c
+@@ -132,9 +132,7 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
+ static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
+ 				      struct clk_rate_request *req)
+ {
+-	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+-
+-	return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
++	return clk_mux_determine_rate_flags(hw, req, 0);
+ }
+ 
+ const struct clk_ops mtk_mux_clr_set_upd_ops = {
+diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
+index 69ebf65081b81f..bbd7d64038fab5 100644
+--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
++++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
+@@ -371,23 +371,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
+ 	return 0;
+ }
+ 
+-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
+-				    unsigned long *prate)
++static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
++				       struct clk_rate_request *req)
+ {
+ 	unsigned long m;
+ 
+-	if (*prate < rate) {
++	if (req->best_parent_rate < req->rate) {
+ 		pr_warn("%s: pll dividers not supported\n", __func__);
+ 		return -EINVAL;
+ 	}
+ 
+-	m = DIV_ROUND_UP_ULL(*prate, rate * 2);
+-	if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+-		pr_warn("%s: unable to support rate %lu\n", __func__, rate);
++	m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
++	if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
++		pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
+ 		return -EINVAL;
+ 	}
+ 
+-	return 2 * *prate * m;
++	req->rate = 2 * req->best_parent_rate * m;
++
++	return 0;
+ }
+ 
+ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+@@ -403,7 +405,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	}
+ 
+ 	m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
+-	if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
++	if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ 		pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ 		return -EINVAL;
+ 	}
+@@ -444,7 +446,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ static const struct clk_ops lpc18xx_pll0_ops = {
+ 	.recalc_rate	= lpc18xx_pll0_recalc_rate,
+-	.round_rate	= lpc18xx_pll0_round_rate,
++	.determine_rate = lpc18xx_pll0_determine_rate,
+ 	.set_rate	= lpc18xx_pll0_set_rate,
+ };
+ 
+diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
+index 7bfba0afd77831..4ec408c3a26aa4 100644
+--- a/drivers/clk/tegra/clk-bpmp.c
++++ b/drivers/clk/tegra/clk-bpmp.c
+@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
+ 
+ 	bpmp->num_clocks = count;
+ 
+-	bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
++	bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
+ 	if (!bpmp->clocks)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
+index e95fdc49c2269c..bbceb0289d457a 100644
+--- a/drivers/clocksource/clps711x-timer.c
++++ b/drivers/clocksource/clps711x-timer.c
+@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
+ 	unsigned int irq = irq_of_parse_and_map(np, 0);
+ 	struct clk *clock = of_clk_get(np, 0);
+ 	void __iomem *base = of_iomap(np, 0);
++	int ret = 0;
+ 
+ 	if (!base)
+ 		return -ENOMEM;
+-	if (!irq)
+-		return -EINVAL;
+-	if (IS_ERR(clock))
+-		return PTR_ERR(clock);
++	if (!irq) {
++		ret = -EINVAL;
++		goto unmap_io;
++	}
++	if (IS_ERR(clock)) {
++		ret = PTR_ERR(clock);
++		goto unmap_io;
++	}
+ 
+ 	switch (of_alias_get_id(np, "timer")) {
+ 	case CLPS711X_CLKSRC_CLOCKSOURCE:
+ 		clps711x_clksrc_init(clock, base);
+ 		break;
+ 	case CLPS711X_CLKSRC_CLOCKEVENT:
+-		return _clps711x_clkevt_init(clock, base, irq);
++		ret =  _clps711x_clkevt_init(clock, base, irq);
++		break;
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
++		break;
+ 	}
+ 
+-	return 0;
++unmap_io:
++	iounmap(base);
++	return ret;
+ }
+ TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index ed782c0b48af25..60b4e3b608c006 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1341,10 +1341,10 @@ static void update_qos_request(enum freq_qos_req_type type)
+ 			continue;
+ 
+ 		req = policy->driver_data;
+-		cpufreq_cpu_put(policy);
+-
+-		if (!req)
++		if (!req) {
++			cpufreq_cpu_put(policy);
+ 			continue;
++		}
+ 
+ 		if (hwp_active)
+ 			intel_pstate_get_hwp_cap(cpu);
+@@ -1360,6 +1360,8 @@ static void update_qos_request(enum freq_qos_req_type type)
+ 
+ 		if (freq_qos_update_request(req, freq) < 0)
+ 			pr_warn("Failed to update freq constraint: CPU%d\n", i);
++
++		cpufreq_cpu_put(policy);
+ 	}
+ }
+ 
+diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
+index 7b8fcfa55038bc..39186008afbfdf 100644
+--- a/drivers/cpufreq/tegra186-cpufreq.c
++++ b/drivers/cpufreq/tegra186-cpufreq.c
+@@ -86,10 +86,14 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
+ {
+ 	struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ 	struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+-	unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
++	unsigned int edvd_offset;
+ 	u32 edvd_val = tbl->driver_data;
++	u32 cpu;
+ 
+-	writel(edvd_val, data->regs + edvd_offset);
++	for_each_cpu(cpu, policy->cpus) {
++		edvd_offset = data->cpus[cpu].edvd_offset;
++		writel(edvd_val, data->regs + edvd_offset);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+index f0eddb7854e5d8..15e843b4854d0a 100644
+--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+@@ -346,7 +346,7 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+ 
+ 	} else {
+ 		dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+-			     DMA_TO_DEVICE);
++			     DMA_FROM_DEVICE);
+ 		dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ 			     DMA_TO_DEVICE);
+ 	}
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index 099b32a10dd753..d234495f1115b8 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -548,7 +548,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
+ 
+ 	if (err && (dd->flags & TDES_FLAGS_FAST)) {
+ 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+-		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
++		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ 	}
+ 
+ 	return err;
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+index 29c9537216fa6d..8f3406289df430 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+@@ -252,7 +252,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+ 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
+ 	struct rk_crypto_info *rkc = rctx->dev;
+ 
+-	dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
++	dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ }
+ 
+ static int rk_hash_run(struct crypto_engine *engine, void *breq)
+diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
+index 9a2656d73600b5..b39f395c077eeb 100644
+--- a/drivers/firmware/meson/meson_sm.c
++++ b/drivers/firmware/meson/meson_sm.c
+@@ -225,11 +225,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
+ struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
+ {
+ 	struct platform_device *pdev = of_find_device_by_node(sm_node);
++	struct meson_sm_firmware *fw;
+ 
+ 	if (!pdev)
+ 		return NULL;
+ 
+-	return platform_get_drvdata(pdev);
++	fw = platform_get_drvdata(pdev);
++
++	put_device(&pdev->dev);
++
++	return fw;
+ }
+ EXPORT_SYMBOL_GPL(meson_sm_get);
+ 
+diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
+index cfa7b0a50c8e33..03b16b8f639ad0 100644
+--- a/drivers/gpio/gpio-wcd934x.c
++++ b/drivers/gpio/gpio-wcd934x.c
+@@ -102,7 +102,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
+ 	chip->base = -1;
+ 	chip->ngpio = WCD934X_NPINS;
+ 	chip->label = dev_name(dev);
+-	chip->can_sleep = false;
++	chip->can_sleep = true;
+ 
+ 	return devm_gpiochip_add_data(dev, chip, data);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 670d5ab9d9984f..f97c1826770825 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
+ 	REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+ 
+ 	if (data->taps.h_taps + data->taps.v_taps <= 2) {
+-		/* Set bypass */
+-
+-		/* DCE6 has no SCL_MODE register, skip scale mode programming */
++		/* Disable scaler functionality */
++		REG_WRITE(SCL_SCALER_ENABLE, 0);
+ 
++		/* Clear registers that can cause glitches even when the scaler is off */
++		REG_WRITE(SCL_TAP_CONTROL, 0);
++		REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
++		REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+ 		return false;
+ 	}
+ 
+@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
+ 			SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ 			SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+ 
+-	/* DCE6 has no SCL_MODE register, skip scale mode programming */
++	REG_WRITE(SCL_SCALER_ENABLE, 1);
+ 
+ 	/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
+ 
+@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
+ 	REG_SET(DC_LB_MEM_SIZE, 0,
+ 		DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ 
++	REG_WRITE(SCL_UPDATE, 0x00010000);
++
+ 	/* Clear SCL_F_SHARP_CONTROL value to 0 */
+ 	REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+ 
+@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
+ 		if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ 			/* 4. Program vertical filters */
+ 			if (xfm_dce->filter_v == NULL)
+-				REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+-						SCL_V_2TAP_HARDCODE_COEF_EN, 0);
++				REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
+ 			program_multi_taps_filter(
+ 					xfm_dce,
+ 					data->taps.v_taps,
+@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
+ 
+ 			/* 5. Program horizontal filters */
+ 			if (xfm_dce->filter_h == NULL)
+-				REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+-						SCL_H_2TAP_HARDCODE_COEF_EN, 0);
++				REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
+ 			program_multi_taps_filter(
+ 					xfm_dce,
+ 					data->taps.h_taps,
+@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
+ 	/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
+ 
+ 	/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
++
++	REG_WRITE(SCL_UPDATE, 0);
+ }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+index cbce194ec7b82b..eb716e8337e236 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+@@ -155,6 +155,9 @@
+ 	SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ 	SRI(VIEWPORT_START, SCL, id), \
+ 	SRI(VIEWPORT_SIZE, SCL, id), \
++	SRI(SCL_SCALER_ENABLE, SCL, id), \
++	SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
++	SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
+ 	SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ 	SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ 	SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+@@ -590,6 +593,7 @@ struct dce_transform_registers {
+ 	uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ 	uint32_t SCL_HORZ_FILTER_INIT;
+ #if defined(CONFIG_DRM_AMD_DC_SI)
++	uint32_t SCL_SCALER_ENABLE;
+ 	uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
+ 	uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+index 9de01ae574c035..067eddd9c62d80 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+@@ -4115,6 +4115,7 @@
+ #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+ #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
+ #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
++#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
+ #define mmSCL0_SCL_CONTROL 0x1B44
+ #define mmSCL0_SCL_DEBUG 0x1B6A
+ #define mmSCL0_SCL_DEBUG2 0x1B69
+@@ -4144,6 +4145,7 @@
+ #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
+ #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
+ #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
++#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
+ #define mmSCL1_SCL_CONTROL 0x1E44
+ #define mmSCL1_SCL_DEBUG 0x1E6A
+ #define mmSCL1_SCL_DEBUG2 0x1E69
+@@ -4173,6 +4175,7 @@
+ #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
+ #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
+ #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
++#define mmSCL2_SCL_SCALER_ENABLE 0x4142
+ #define mmSCL2_SCL_CONTROL 0x4144
+ #define mmSCL2_SCL_DEBUG 0x416A
+ #define mmSCL2_SCL_DEBUG2 0x4169
+@@ -4202,6 +4205,7 @@
+ #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
+ #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
+ #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
++#define mmSCL3_SCL_SCALER_ENABLE 0x4442
+ #define mmSCL3_SCL_CONTROL 0x4444
+ #define mmSCL3_SCL_DEBUG 0x446A
+ #define mmSCL3_SCL_DEBUG2 0x4469
+@@ -4231,6 +4235,7 @@
+ #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
+ #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
+ #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
++#define mmSCL4_SCL_SCALER_ENABLE 0x4742
+ #define mmSCL4_SCL_CONTROL 0x4744
+ #define mmSCL4_SCL_DEBUG 0x476A
+ #define mmSCL4_SCL_DEBUG2 0x4769
+@@ -4260,6 +4265,7 @@
+ #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
+ #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
+ #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
++#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
+ #define mmSCL5_SCL_CONTROL 0x4A44
+ #define mmSCL5_SCL_DEBUG 0x4A6A
+ #define mmSCL5_SCL_DEBUG2 0x4A69
+@@ -4287,6 +4293,7 @@
+ #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
+ #define mmSCL_COEF_RAM_SELECT 0x1B40
+ #define mmSCL_COEF_RAM_TAP_DATA 0x1B41
++#define mmSCL_SCALER_ENABLE 0x1B42
+ #define mmSCL_CONTROL 0x1B44
+ #define mmSCL_DEBUG 0x1B6A
+ #define mmSCL_DEBUG2 0x1B69
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+index bd8085ec54ed57..da5596fbfdcb31 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+@@ -8648,6 +8648,8 @@
+ #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
+ #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
+ #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
++#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
++#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
+ #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
+ #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
+ #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 036ee034397283..11f164a15d7c21 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -836,7 +836,7 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+ 		nvif_vmm_put(vmm, &old_mem->vma[1]);
+ 		nvif_vmm_put(vmm, &old_mem->vma[0]);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+index 92f4261305bd9d..f2ae5d17ea601e 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+@@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ 	udelay(10);
+ 	rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
+ 
+-	ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
++	rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK);
++	rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1);
++
++	ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN;
+ 	rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
+ 
+ 	rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+index a6b276f1d6ee15..a54c7eb4113b93 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+@@ -12,6 +12,9 @@
+ #define LINKSR_LPBUSY			(1 << 1)
+ #define LINKSR_HSBUSY			(1 << 0)
+ 
++#define TXSETR				0x100
++#define TXSETR_LANECNT_MASK		(0x3 << 0)
++
+ /*
+  * Video Mode Register
+  */
+@@ -80,10 +83,7 @@
+  * PHY-Protocol Interface (PPI) Registers
+  */
+ #define PPISETR				0x700
+-#define PPISETR_DLEN_0			(0x1 << 0)
+-#define PPISETR_DLEN_1			(0x3 << 0)
+-#define PPISETR_DLEN_2			(0x7 << 0)
+-#define PPISETR_DLEN_3			(0xf << 0)
++#define PPISETR_DLEN_MASK		(0xf << 0)
+ #define PPISETR_CLEN			(1 << 8)
+ 
+ #define PPICLCR				0x710
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index b129ce873af3f2..b235e7cc41f3f8 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1514,6 +1514,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ 		       SVGA3dCmdHeader *header)
+ {
+ 	struct vmw_bo *vmw_bo = NULL;
++	struct vmw_resource *res;
+ 	struct vmw_surface *srf = NULL;
+ 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
+ 	int ret;
+@@ -1549,18 +1550,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ 
+ 	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+ 		VMW_RES_DIRTY_SET : 0;
+-	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+-				dirty, user_surface_converter,
+-				&cmd->body.host.sid, NULL);
++	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
++				user_surface_converter, &cmd->body.host.sid,
++				NULL);
+ 	if (unlikely(ret != 0)) {
+ 		if (unlikely(ret != -ERESTARTSYS))
+ 			VMW_DEBUG_USER("could not find surface for DMA.\n");
+ 		return ret;
+ 	}
+ 
+-	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
++	res = sw_context->res_cache[vmw_res_surface].res;
++	if (!res) {
++		VMW_DEBUG_USER("Invalid DMA surface.\n");
++		return -EINVAL;
++	}
+ 
+-	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
++	srf = vmw_res_to_srf(res);
++	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
++			     header);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+index aaacbdcbd742fb..946f166d6fc765 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+@@ -326,8 +326,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ 		hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
+ 	}
+ 	node->res = vmw_resource_reference_unless_doomed(res);
+-	if (!node->res)
++	if (!node->res) {
++		hash_del_rcu(&node->hash.head);
+ 		return -ESRCH;
++	}
+ 
+ 	node->first_usage = 1;
+ 	if (!res->dev_priv->has_mob) {
+@@ -654,7 +656,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+ 		hash_del_rcu(&val->hash.head);
+ 
+ 	list_for_each_entry(val, &ctx->resource_ctx_list, head)
+-		hash_del_rcu(&entry->hash.head);
++		hash_del_rcu(&val->hash.head);
+ 
+ 	ctx->sw_context = NULL;
+ }
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index f52abf759260f2..5de86a953d4182 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -118,7 +118,7 @@
+ #define AMS_ALARM_THRESHOLD_OFF_10	0x10
+ #define AMS_ALARM_THRESHOLD_OFF_20	0x20
+ 
+-#define AMS_ALARM_THR_DIRECT_MASK	BIT(1)
++#define AMS_ALARM_THR_DIRECT_MASK	BIT(0)
+ #define AMS_ALARM_THR_MIN		0x0000
+ #define AMS_ALARM_THR_MAX		(BIT(16) - 1)
+ 
+@@ -385,6 +385,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
+ 	ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+ }
+ 
++static void ams_unmask(struct ams *ams)
++{
++	unsigned int status, unmask;
++
++	status = readl(ams->base + AMS_ISR_0);
++
++	/* Clear those bits which are not active anymore */
++	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
++
++	/* Clear status of disabled alarm */
++	unmask |= ams->intr_mask;
++
++	ams->current_masked_alarm &= status;
++
++	/* Also clear those which are masked out anyway */
++	ams->current_masked_alarm &= ~ams->intr_mask;
++
++	/* Clear the interrupts before we unmask them */
++	writel(unmask, ams->base + AMS_ISR_0);
++
++	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
++}
++
+ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ {
+ 	unsigned long flags;
+@@ -397,6 +420,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+ 
+ 	spin_lock_irqsave(&ams->intr_lock, flags);
+ 	ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
++	ams_unmask(ams);
+ 	spin_unlock_irqrestore(&ams->intr_lock, flags);
+ }
+ 
+@@ -1025,28 +1049,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+ static void ams_unmask_worker(struct work_struct *work)
+ {
+ 	struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+-	unsigned int status, unmask;
+ 
+ 	spin_lock_irq(&ams->intr_lock);
+-
+-	status = readl(ams->base + AMS_ISR_0);
+-
+-	/* Clear those bits which are not active anymore */
+-	unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+-
+-	/* Clear status of disabled alarm */
+-	unmask |= ams->intr_mask;
+-
+-	ams->current_masked_alarm &= status;
+-
+-	/* Also clear those which are masked out anyway */
+-	ams->current_masked_alarm &= ~ams->intr_mask;
+-
+-	/* Clear the interrupts before we unmask them */
+-	writel(unmask, ams->base + AMS_ISR_0);
+-
+-	ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+-
++	ams_unmask(ams);
+ 	spin_unlock_irq(&ams->intr_lock);
+ 
+ 	/* If still pending some alarm re-trigger the timer */
+diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
+index e0b7f658d61196..cf9cf90cd6e276 100644
+--- a/drivers/iio/dac/ad5360.c
++++ b/drivers/iio/dac/ad5360.c
+@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ 	unsigned int clr)
+ {
+ 	struct ad5360_state *st = iio_priv(indio_dev);
+-	unsigned int ret;
++	int ret;
+ 
+ 	mutex_lock(&st->lock);
+ 
+diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
+index 7644acfd879e04..9228e3cee1b850 100644
+--- a/drivers/iio/dac/ad5421.c
++++ b/drivers/iio/dac/ad5421.c
+@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ 	unsigned int clr)
+ {
+ 	struct ad5421_state *st = iio_priv(indio_dev);
+-	unsigned int ret;
++	int ret;
+ 
+ 	mutex_lock(&st->lock);
+ 
+diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
+index 4abf80f75ef5d9..03e75261e891fe 100644
+--- a/drivers/iio/frequency/adf4350.c
++++ b/drivers/iio/frequency/adf4350.c
+@@ -143,6 +143,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
+ 	if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
+ 		return -EINVAL;
+ 
++	st->r4_rf_div_sel = 0;
++
++	/*
++	 * !\TODO: The below computation is making sure we get a power of 2
++	 * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
++	 * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
++	 * and friends.
++	 */
++	while (freq < ADF4350_MIN_VCO_FREQ) {
++		freq <<= 1;
++		st->r4_rf_div_sel++;
++	}
++
+ 	if (freq > ADF4350_MAX_FREQ_45_PRESC) {
+ 		prescaler = ADF4350_REG1_PRESCALER;
+ 		mdiv = 75;
+@@ -151,13 +164,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
+ 		mdiv = 23;
+ 	}
+ 
+-	st->r4_rf_div_sel = 0;
+-
+-	while (freq < ADF4350_MIN_VCO_FREQ) {
+-		freq <<= 1;
+-		st->r4_rf_div_sel++;
+-	}
+-
+ 	/*
+ 	 * Allow a predefined reference division factor
+ 	 * if not set, compute our own
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+index 91c181bb92869d..a1f055014cc652 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -747,10 +747,6 @@ static int inv_icm42600_resume(struct device *dev)
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
+-	pm_runtime_enable(dev);
+-
+ 	/* restore sensors state */
+ 	ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
+ 					 st->suspended.accel,
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 88bccdbb0bed08..ac7b8fb36db8fb 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4415,7 +4415,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
+ 			}
+ 
+ 			if (info->ats_supported && ecap_prs(iommu->ecap) &&
+-			    pci_pri_supported(pdev))
++			    ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
+ 				info->pri_supported = 1;
+ 		}
+ 	}
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 57289966915492..2d20cf9d84cead 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -248,11 +248,11 @@ static int plic_irq_suspend(void)
+ 
+ 	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
+ 
+-	for (i = 0; i < priv->nr_irqs; i++)
+-		if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
+-			__set_bit(i, priv->prio_save);
+-		else
+-			__clear_bit(i, priv->prio_save);
++	/* irq ID 0 is reserved */
++	for (i = 1; i < priv->nr_irqs; i++) {
++		__assign_bit(i, priv->prio_save,
++			     readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
++	}
+ 
+ 	for_each_cpu(cpu, cpu_present_mask) {
+ 		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+@@ -279,7 +279,8 @@ static void plic_irq_resume(void)
+ 
+ 	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
+ 
+-	for (i = 0; i < priv->nr_irqs; i++) {
++	/* irq ID 0 is reserved */
++	for (i = 1; i < priv->nr_irqs; i++) {
+ 		index = BIT_WORD(i);
+ 		writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
+ 		       priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index e4fcac97dbfaa0..90f248ef2a1fcc 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -616,11 +616,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+ 	i = pdata->num_mboxes;
+ 	for (; i >= 0; i--) {
+ 		ipi_mbox = &pdata->ipi_mboxes[i];
+-		if (ipi_mbox->dev.parent) {
+-			mbox_controller_unregister(&ipi_mbox->mbox);
+-			if (device_is_registered(&ipi_mbox->dev))
+-				device_unregister(&ipi_mbox->dev);
+-		}
++		if (device_is_registered(&ipi_mbox->dev))
++			device_unregister(&ipi_mbox->dev);
+ 	}
+ }
+ 
+diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
+index 1f7edc0f5b1abe..a184fd9fc6f3b3 100644
+--- a/drivers/media/i2c/mt9v111.c
++++ b/drivers/media/i2c/mt9v111.c
+@@ -534,8 +534,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
+ static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
+ {
+ 	struct i2c_client *c = mt9v111->client;
+-	unsigned int ret;
+ 	u16 outfmtctrl2;
++	int ret;
+ 
+ 	/* Force device reset. */
+ 	ret = __mt9v111_hw_reset(mt9v111);
+diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
+index 94abd042045dab..6fd8885f5928e4 100644
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd)
+ {
+ 	struct media_devnode *devnode = to_media_devnode(cd);
+ 
+-	mutex_lock(&media_devnode_lock);
+-	/* Mark device node number as free */
+-	clear_bit(devnode->minor, media_devnode_nums);
+-	mutex_unlock(&media_devnode_lock);
+-
+ 	/* Release media_devnode and perform other cleanups as needed. */
+ 	if (devnode->release)
+ 		devnode->release(devnode);
+@@ -283,6 +278,7 @@ void media_devnode_unregister(struct media_devnode *devnode)
+ 	/* Delete the cdev on this minor as well */
+ 	cdev_device_del(&devnode->cdev, &devnode->dev);
+ 	devnode->media_dev = NULL;
++	clear_bit(devnode->minor, media_devnode_nums);
+ 	mutex_unlock(&media_devnode_lock);
+ 
+ 	put_device(&devnode->dev);
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 951b79ca125cdd..f885e718524c22 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -678,7 +678,7 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 		 * (already discovered through iterating over links) and pads
+ 		 * not internally connected.
+ 		 */
+-		if (origin == local || !local->num_links ||
++		if (origin == local || local->num_links ||
+ 		    !media_entity_has_pad_interdep(origin->entity, origin->index,
+ 						   local->index))
+ 			continue;
+diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
+index 013694bfcb1c1b..7cbb2d5869320b 100644
+--- a/drivers/media/pci/cx18/cx18-queue.c
++++ b/drivers/media/pci/cx18/cx18-queue.c
+@@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s)
+ 			break;
+ 		}
+ 
++		buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
++						 buf->buf, s->buf_size,
++						 s->dma);
++		if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) {
++			kfree(buf->buf);
++			kfree(mdl);
++			kfree(buf);
++			break;
++		}
++
+ 		INIT_LIST_HEAD(&mdl->list);
+ 		INIT_LIST_HEAD(&mdl->buf_list);
+ 		mdl->id = s->mdl_base_idx; /* a somewhat safe value */
+ 		cx18_enqueue(s, mdl, &s->q_idle);
+ 
+ 		INIT_LIST_HEAD(&buf->list);
+-		buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+-						 buf->buf, s->buf_size,
+-						 s->dma);
+ 		cx18_buf_sync_for_cpu(s, buf);
+ 		list_add_tail(&buf->list, &s->buf_pool);
+ 	}
+diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
+index b7aaa8b4a7841d..e39bf64c5c715b 100644
+--- a/drivers/media/pci/ivtv/ivtv-irq.c
++++ b/drivers/media/pci/ivtv/ivtv-irq.c
+@@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
+ 
+ 	/* Insert buffer block for YUV if needed */
+ 	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
+-		if (yi->blanking_dmaptr) {
++		if (yi->blanking_ptr) {
+ 			s->sg_pending[idx].src = yi->blanking_dmaptr;
+ 			s->sg_pending[idx].dst = offset;
+ 			s->sg_pending[idx].size = 720 * 16;
+diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
+index 2d9274537725af..71f0401066471a 100644
+--- a/drivers/media/pci/ivtv/ivtv-yuv.c
++++ b/drivers/media/pci/ivtv/ivtv-yuv.c
+@@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
+ 	ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
+ 
+ 	/* If we've offset the y plane, ensure top area is blanked */
+-	if (f->offset_y && yi->blanking_dmaptr) {
++	if (f->offset_y && yi->blanking_ptr) {
+ 		dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
+ 		dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
+ 		dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
+@@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv)
+ 		yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
+ 						     yi->blanking_ptr,
+ 						     720 * 16, DMA_TO_DEVICE);
++		if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) {
++			kfree(yi->blanking_ptr);
++			yi->blanking_ptr = NULL;
++			yi->blanking_dmaptr = 0;
++			IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n");
++		}
+ 	} else {
+ 		yi->blanking_dmaptr = 0;
+ 		IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
+diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
+index fe7da2b3048299..01461b2fed51d8 100644
+--- a/drivers/media/platform/qcom/venus/firmware.c
++++ b/drivers/media/platform/qcom/venus/firmware.c
+@@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core)
+ 	u32 fw_size = core->fw.mapped_mem_size;
+ 	void __iomem *wrapper_base;
+ 
+-	if (IS_IRIS2_1(core))
++	if (IS_IRIS2(core) || IS_IRIS2_1(core))
+ 		wrapper_base = core->wrapper_tz_base;
+ 	else
+ 		wrapper_base = core->wrapper_base;
+@@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core)
+ 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
+ 	writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
+ 
+-	if (IS_IRIS2_1(core)) {
++	if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+ 		/* Bring XTSS out of reset */
+ 		writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 	} else {
+@@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
+ 	if (resume) {
+ 		venus_reset_cpu(core);
+ 	} else {
+-		if (IS_IRIS2_1(core))
++		if (IS_IRIS2(core) || IS_IRIS2_1(core))
+ 			writel(WRAPPER_XTSS_SW_RESET_BIT,
+ 			       core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 		else
+@@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
+ 	void __iomem *wrapper_base = core->wrapper_base;
+ 	void __iomem *wrapper_tz_base = core->wrapper_tz_base;
+ 
+-	if (IS_IRIS2_1(core)) {
++	if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+ 		/* Assert the reset to XTSS */
+ 		reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ 		reg |= WRAPPER_XTSS_SW_RESET_BIT;
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index f8901d6fbe9bf1..ff3088691d5e18 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -735,11 +735,11 @@ int lirc_register(struct rc_dev *dev)
+ 
+ 	cdev_init(&dev->lirc_cdev, &lirc_fops);
+ 
++	get_device(&dev->dev);
++
+ 	err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev);
+ 	if (err)
+-		goto out_ida;
+-
+-	get_device(&dev->dev);
++		goto out_put_device;
+ 
+ 	switch (dev->driver_type) {
+ 	case RC_DRIVER_SCANCODE:
+@@ -763,7 +763,8 @@ int lirc_register(struct rc_dev *dev)
+ 
+ 	return 0;
+ 
+-out_ida:
++out_put_device:
++	put_device(&dev->lirc_dev);
+ 	ida_free(&lirc_ida, minor);
+ 	return err;
+ }
+diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
+index e73dd330af477d..d913fb901973f0 100644
+--- a/drivers/memory/samsung/exynos-srom.c
++++ b/drivers/memory/samsung/exynos-srom.c
+@@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	srom->dev = dev;
+-	srom->reg_base = of_iomap(np, 0);
+-	if (!srom->reg_base) {
++	srom->reg_base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(srom->reg_base)) {
+ 		dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+-		return -ENOMEM;
++		return PTR_ERR(srom->reg_base);
+ 	}
+ 
+ 	platform_set_drvdata(pdev, srom);
+ 
+ 	srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+ 						      ARRAY_SIZE(exynos_srom_offsets));
+-	if (!srom->reg_offset) {
+-		iounmap(srom->reg_base);
++	if (!srom->reg_offset)
+ 		return -ENOMEM;
+-	}
+ 
+ 	for_each_child_of_node(np, child) {
+ 		if (exynos_srom_configure_bank(srom, child)) {
+diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+index 992855bfda3e47..6daf33e07ea0a8 100644
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -81,8 +81,9 @@ static struct mfd_cell chtdc_ti_dev[] = {
+ static const struct regmap_config chtdc_ti_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+-	.max_register = 128,
+-	.cache_type = REGCACHE_NONE,
++	.max_register = 0xff,
++	/* The hardware does not support reading multiple registers at once */
++	.use_single_read = true,
+ };
+ 
+ static const struct regmap_irq chtdc_ti_irqs[] = {
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 5e03a6ba55711a..a85442d74e0928 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -322,11 +322,11 @@ static void fastrpc_free_map(struct kref *ref)
+ 
+ 			perm.vmid = QCOM_SCM_VMID_HLOS;
+ 			perm.perm = QCOM_SCM_PERM_RWX;
+-			err = qcom_scm_assign_mem(map->phys, map->size,
++			err = qcom_scm_assign_mem(map->phys, map->len,
+ 				&src_perms, &perm, 1);
+ 			if (err) {
+-				dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
+-						map->phys, map->size, err);
++				dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
++						map->phys, map->len, err);
+ 				return;
+ 			}
+ 		}
+@@ -752,7 +752,8 @@ static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
+ 	struct fastrpc_session_ctx *sess = fl->sctx;
+ 	struct fastrpc_map *map = NULL;
+ 	struct sg_table *table;
+-	int err = 0;
++	struct scatterlist *sgl = NULL;
++	int err = 0, sgl_index = 0;
+ 
+ 	map = kzalloc(sizeof(*map), GFP_KERNEL);
+ 	if (!map)
+@@ -789,7 +790,15 @@ static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
+ 		map->phys = sg_dma_address(map->table->sgl);
+ 		map->phys += ((u64)fl->sctx->sid << 32);
+ 	}
+-	map->size = len;
++	for_each_sg(map->table->sgl, sgl, map->table->nents,
++		sgl_index)
++		map->size += sg_dma_len(sgl);
++	if (len > map->size) {
++		dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
++				len, map->size);
++		err = -EINVAL;
++		goto map_err;
++	}
+ 	map->va = sg_virt(map->table->sgl);
+ 	map->len = len;
+ 
+@@ -806,10 +815,10 @@ static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
+ 		dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
+ 		dst_perms[1].perm = QCOM_SCM_PERM_RWX;
+ 		map->attr = attr;
+-		err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
++		err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2);
+ 		if (err) {
+-			dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
+-					map->phys, map->size, err);
++			dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
++					map->phys, map->len, err);
+ 			goto map_err;
+ 		}
+ 	}
+@@ -1240,7 +1249,7 @@ static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_reques
+ 		 * that does not support unsigned PD offload
+ 		 */
+ 		if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
+-			dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
++			dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD\n");
+ 			return true;
+ 		}
+ 	}
+@@ -1304,7 +1313,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 							&src_perms,
+ 							fl->cctx->vmperms, fl->cctx->vmcount);
+ 			if (err) {
+-				dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
++				dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
+ 					fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+ 				goto err_map;
+ 			}
+@@ -1358,7 +1367,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 						(u64)fl->cctx->remote_heap->size,
+ 						&src_perms, &dst_perms, 1);
+ 		if (err)
+-			dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
++			dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
+ 				fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+ 	}
+ err_map:
+@@ -2063,7 +2072,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	args[0].length = sizeof(req_msg);
+ 
+ 	pages.addr = map->phys;
+-	pages.size = map->size;
++	pages.size = map->len;
+ 
+ 	args[1].ptr = (u64) (uintptr_t) &pages;
+ 	args[1].length = sizeof(pages);
+@@ -2078,7 +2087,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
+ 	if (err) {
+ 		dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
+-			req.fd, req.vaddrin, map->size);
++			req.fd, req.vaddrin, map->len);
+ 		goto err_invoke;
+ 	}
+ 
+@@ -2091,7 +2100,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ 		/* unmap the memory and release the buffer */
+ 		req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
+-		req_unmap.length = map->size;
++		req_unmap.length = map->len;
+ 		fastrpc_req_mem_unmap_impl(fl, &req_unmap);
+ 		return -EFAULT;
+ 	}
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index cb87e827377934..2c58df6855f2a5 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host)
+  */
+ static int mmc_sdio_alive(struct mmc_host *host)
+ {
+-	return mmc_select_card(host->card);
++	if (!mmc_host_is_spi(host))
++		return mmc_select_card(host->card);
++	else
++		return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0,
++					NULL);
+ }
+ 
+ /*
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index fe5912d31beea4..b0a70badf3eb77 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+ 	if (!of_property_read_u32(np, "bank-width", &val)) {
+ 		if (val == 2) {
+ 			nand->options |= NAND_BUSWIDTH_16;
+-		} else if (val != 1) {
++		} else if (val == 1) {
++			nand->options |= NAND_BUSWIDTH_AUTO;
++		} else {
+ 			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+ 			return -EINVAL;
+ 		}
++	} else {
++		nand->options |= NAND_BUSWIDTH_AUTO;
+ 	}
+ 
+ 	if (of_property_read_bool(np, "nand-skip-bbtscan"))
+diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+index eee675a25b2c3c..ef9adecb639aec 100644
+--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+@@ -483,10 +483,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
+ 					"missing 'reg' property in node %pOF\n",
+ 					tbi);
+ 				err = -EBUSY;
++				of_node_put(tbi);
+ 				goto error;
+ 			}
+ 			set_tbipa(*prop, pdev,
+ 				  data->get_tbipa, priv->map, &res);
++			of_node_put(tbi);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 33bbcced81059a..275561272721e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1177,9 +1177,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ 
+ 				hlist_del_rcu(&entry->hlist);
+-				kfree_rcu(entry, rcu);
+ 				en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ 				       entry->mac, priv->port);
++				kfree_rcu(entry, rcu);
+ 				++removed;
+ 			}
+ 		}
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index 0e8ff839cae234..3a340cb2b205f4 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -1763,14 +1763,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+ 	mutex_unlock(&ab->core_lock);
+ 
+ 	ath11k_dp_free(ab);
+-	ath11k_hal_srng_deinit(ab);
++	ath11k_hal_srng_clear(ab);
+ 
+ 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+ 
+-	ret = ath11k_hal_srng_init(ab);
+-	if (ret)
+-		return ret;
+-
+ 	clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ 
+ 	ret = ath11k_core_qmi_firmware_ready(ab);
+diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
+index 1215408d1a6abb..79cf65bc2e1724 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1356,6 +1356,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+ }
+ EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+ 
++void ath11k_hal_srng_clear(struct ath11k_base *ab)
++{
++	/* No need to memset rdp and wrp memory since each individual
++	 * segment would get cleared in ath11k_hal_srng_src_hw_init()
++	 * and ath11k_hal_srng_dst_hw_init().
++	 */
++	memset(ab->hal.srng_list, 0,
++	       sizeof(ab->hal.srng_list));
++	memset(ab->hal.shadow_reg_addr, 0,
++	       sizeof(ab->hal.shadow_reg_addr));
++	ab->hal.avail_blk_resource = 0;
++	ab->hal.current_blk_index = 0;
++	ab->hal.num_shadow_reg_configured = 0;
++}
++EXPORT_SYMBOL(ath11k_hal_srng_clear);
++
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+ {
+ 	struct hal_srng *srng;
+diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
+index 80447f488954a8..aa21eb1fdce159 100644
+--- a/drivers/net/wireless/ath/ath11k/hal.h
++++ b/drivers/net/wireless/ath/ath11k/hal.h
+@@ -962,6 +962,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ 			  struct hal_srng_params *params);
+ int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+ void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
++void ath11k_hal_srng_clear(struct ath11k_base *ab);
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+ void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ 				       u32 **cfg, u32 *len);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index 13e892d788b277..92281811103ef9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
+ 	/* Netgear, Inc. [A8000,AXE3000] */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++	/* Netgear, Inc. A7500 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	/* TP-Link TXE50UH */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 136dba6221d86a..053385c84bf848 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2986,10 +2986,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		 * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
+ 		 * because of high power consumption (> 2 Watt) in s2idle
+ 		 * sleep. Only some boards with Intel CPU are affected.
++		 * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
+ 		 */
+ 		if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ 		    dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ 		    dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
++		    dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 5bfec440b4fd70..aae4e8ef9e3654 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -3957,6 +3957,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add)
+ 		unittest(!np, "Child device tree node is not removed\n");
+ 		child_dev = device_find_any_child(&pdev->dev);
+ 		unittest(!child_dev, "Child device is not removed\n");
++		put_device(child_dev);
+ 	}
+ 
+ failed:
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index bf9a961c9f2766..9055ce34c636bd 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -1213,8 +1213,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+-			  "ks-pcie-error-irq", ks_pcie);
++	ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
++			       "ks-pcie-error-irq", ks_pcie);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to request error IRQ %d\n",
+ 			irq);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 115a21cf869ae6..c7d3e248a59a20 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1218,6 +1218,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ 	struct mrq_uphy_response resp;
+ 	struct tegra_bpmp_message msg;
+ 	struct mrq_uphy_request req;
++	int err;
+ 
+ 	/*
+ 	 * Controller-5 doesn't need to have its state set by BPMP-FW in
+@@ -1240,7 +1241,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ 	msg.rx.data = &resp;
+ 	msg.rx.size = sizeof(resp);
+ 
+-	return tegra_bpmp_transfer(pcie->bpmp, &msg);
++	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++	if (err)
++		return err;
++	if (msg.rx.ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+@@ -1249,6 +1256,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ 	struct mrq_uphy_response resp;
+ 	struct tegra_bpmp_message msg;
+ 	struct mrq_uphy_request req;
++	int err;
+ 
+ 	memset(&req, 0, sizeof(req));
+ 	memset(&resp, 0, sizeof(resp));
+@@ -1268,7 +1276,13 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ 	msg.rx.data = &resp;
+ 	msg.rx.size = sizeof(resp);
+ 
+-	return tegra_bpmp_transfer(pcie->bpmp, &msg);
++	err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++	if (err)
++		return err;
++	if (msg.rx.ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+@@ -1963,10 +1977,10 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ 
+ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ {
+-	if (unlikely(irq > 31))
++	if (unlikely(irq > 32))
+ 		return -EINVAL;
+ 
+-	appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
++	appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 80d975dcb2ee27..da37569460b1ad 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <linux/clk.h>
++#include <linux/cleanup.h>
+ #include <linux/debugfs.h>
+ #include <linux/delay.h>
+ #include <linux/export.h>
+@@ -269,7 +270,7 @@ struct tegra_msi {
+ 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ 	struct irq_domain *domain;
+ 	struct mutex map_lock;
+-	spinlock_t mask_lock;
++	raw_spinlock_t mask_lock;
+ 	void *virt;
+ 	dma_addr_t phys;
+ 	int irq;
+@@ -1604,14 +1605,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
+ 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct tegra_pcie *pcie = msi_to_pcie(msi);
+ 	unsigned int index = d->hwirq / 32;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+-	value &= ~BIT(d->hwirq % 32);
+-	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
++		value &= ~BIT(d->hwirq % 32);
++		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
++	}
+ }
+ 
+ static void tegra_msi_irq_unmask(struct irq_data *d)
+@@ -1619,14 +1619,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
+ 	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct tegra_pcie *pcie = msi_to_pcie(msi);
+ 	unsigned int index = d->hwirq / 32;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+-	value |= BIT(d->hwirq % 32);
+-	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
++		value |= BIT(d->hwirq % 32);
++		afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
++	}
+ }
+ 
+ static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+@@ -1742,7 +1741,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
+ 	int err;
+ 
+ 	mutex_init(&msi->map_lock);
+-	spin_lock_init(&msi->mask_lock);
++	raw_spin_lock_init(&msi->mask_lock);
+ 
+ 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ 		err = tegra_allocate_domains(msi);
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index 704ab5d723a959..77f6366e7db107 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -12,6 +12,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+ #include <linux/delay.h>
+@@ -36,7 +37,7 @@ struct rcar_msi {
+ 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ 	struct irq_domain *domain;
+ 	struct mutex map_lock;
+-	spinlock_t mask_lock;
++	raw_spinlock_t mask_lock;
+ 	int irq1;
+ 	int irq2;
+ };
+@@ -50,20 +51,13 @@ struct rcar_pcie_host {
+ 	int			(*phy_init_fn)(struct rcar_pcie_host *host);
+ };
+ 
+-static DEFINE_SPINLOCK(pmsr_lock);
+-
+ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ {
+-	unsigned long flags;
+ 	u32 pmsr, val;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(&pmsr_lock, flags);
+-
+-	if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+-		ret = -EINVAL;
+-		goto unlock_exit;
+-	}
++	if (!pcie_base || pm_runtime_suspended(pcie_dev))
++		return -EINVAL;
+ 
+ 	pmsr = readl(pcie_base + PMSR);
+ 
+@@ -85,8 +79,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ 		writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ 	}
+ 
+-unlock_exit:
+-	spin_unlock_irqrestore(&pmsr_lock, flags);
+ 	return ret;
+ }
+ 
+@@ -633,28 +625,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
+ {
+ 	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+-	value &= ~BIT(d->hwirq);
+-	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = rcar_pci_read_reg(pcie, PCIEMSIIER);
++		value &= ~BIT(d->hwirq);
++		rcar_pci_write_reg(pcie, value, PCIEMSIIER);
++	}
+ }
+ 
+ static void rcar_msi_irq_unmask(struct irq_data *d)
+ {
+ 	struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
+ 	struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
+-	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&msi->mask_lock, flags);
+-	value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+-	value |= BIT(d->hwirq);
+-	rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+-	spin_unlock_irqrestore(&msi->mask_lock, flags);
++	scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
++		value = rcar_pci_read_reg(pcie, PCIEMSIIER);
++		value |= BIT(d->hwirq);
++		rcar_pci_write_reg(pcie, value, PCIEMSIIER);
++	}
+ }
+ 
+ static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+@@ -770,7 +760,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+ 	int err;
+ 
+ 	mutex_init(&msi->map_lock);
+-	spin_lock_init(&msi->mask_lock);
++	raw_spin_lock_init(&msi->mask_lock);
+ 
+ 	err = of_address_to_resource(dev->of_node, 0, &res);
+ 	if (err)
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index ac1dae113f2d9f..4cf20ca25ea74e 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -282,17 +282,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
+ 	if (!epf_test->dma_supported)
+ 		return;
+ 
+-	dma_release_channel(epf_test->dma_chan_tx);
+-	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++	if (epf_test->dma_chan_tx) {
++		dma_release_channel(epf_test->dma_chan_tx);
++		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++			epf_test->dma_chan_tx = NULL;
++			epf_test->dma_chan_rx = NULL;
++			return;
++		}
+ 		epf_test->dma_chan_tx = NULL;
+-		epf_test->dma_chan_rx = NULL;
+-		return;
+ 	}
+ 
+-	dma_release_channel(epf_test->dma_chan_rx);
+-	epf_test->dma_chan_rx = NULL;
+-
+-	return;
++	if (epf_test->dma_chan_rx) {
++		dma_release_channel(epf_test->dma_chan_rx);
++		epf_test->dma_chan_rx = NULL;
++	}
+ }
+ 
+ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 25dbe85c421758..f97c4f0e1c7a3d 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -581,15 +581,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
+ 	if (dev->no_vf_scan)
+ 		return 0;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < num_vfs; i++) {
+ 		rc = pci_iov_add_virtfn(dev, i);
+ 		if (rc)
+ 			goto failed;
+ 	}
++	pci_unlock_rescan_remove();
+ 	return 0;
+ failed:
+ 	while (i--)
+ 		pci_iov_remove_virtfn(dev, i);
++	pci_unlock_rescan_remove();
+ 
+ 	return rc;
+ }
+@@ -709,8 +712,10 @@ static void sriov_del_vfs(struct pci_dev *dev)
+ 	struct pci_sriov *iov = dev->sriov;
+ 	int i;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < iov->num_VFs; i++)
+ 		pci_iov_remove_virtfn(dev, i);
++	pci_unlock_rescan_remove();
+ }
+ 
+ static void sriov_disable(struct pci_dev *dev)
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 9c59bf03d6579f..1705d2d0ed1268 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1612,6 +1612,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
+ 	switch (err_type) {
+ 	case PCI_ERS_RESULT_NONE:
+ 	case PCI_ERS_RESULT_CAN_RECOVER:
++	case PCI_ERS_RESULT_NEED_RESET:
+ 		envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
+ 		envp[idx++] = "DEVICE_ONLINE=0";
+ 		break;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 92e8ac85f58e86..449d42744d336f 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -196,8 +196,14 @@ static ssize_t max_link_width_show(struct device *dev,
+ 				   struct device_attribute *attr, char *buf)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
++	ssize_t ret;
+ 
+-	return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
++	/* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
++	pci_config_pm_runtime_get(pdev);
++	ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
++	pci_config_pm_runtime_put(pdev);
++
++	return ret;
+ }
+ static DEVICE_ATTR_RO(max_link_width);
+ 
+@@ -209,7 +215,10 @@ static ssize_t current_link_speed_show(struct device *dev,
+ 	int err;
+ 	enum pci_bus_speed speed;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -226,7 +235,10 @@ static ssize_t current_link_width_show(struct device *dev,
+ 	u16 linkstat;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -242,7 +254,10 @@ static ssize_t secondary_bus_number_show(struct device *dev,
+ 	u8 sec_bus;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+@@ -258,7 +273,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
+ 	u8 sub_bus;
+ 	int err;
+ 
++	pci_config_pm_runtime_get(pci_dev);
+ 	err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
++	pci_config_pm_runtime_put(pci_dev);
++
+ 	if (err)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
+index eeb9ea9044b43d..8e700020ee0bc3 100644
+--- a/drivers/pci/pcie/aer.c
++++ b/drivers/pci/pcie/aer.c
+@@ -38,7 +38,7 @@
+ #define AER_ERROR_SOURCES_MAX		128
+ 
+ #define AER_MAX_TYPEOF_COR_ERRS		16	/* as per PCI_ERR_COR_STATUS */
+-#define AER_MAX_TYPEOF_UNCOR_ERRS	27	/* as per PCI_ERR_UNCOR_STATUS*/
++#define AER_MAX_TYPEOF_UNCOR_ERRS	32	/* as per PCI_ERR_UNCOR_STATUS*/
+ 
+ struct aer_err_source {
+ 	unsigned int status;
+@@ -510,11 +510,11 @@ static const char *aer_uncorrectable_error_string[] = {
+ 	"AtomicOpBlocked",		/* Bit Position 24	*/
+ 	"TLPBlockedErr",		/* Bit Position 25	*/
+ 	"PoisonTLPBlocked",		/* Bit Position 26	*/
+-	NULL,				/* Bit Position 27	*/
+-	NULL,				/* Bit Position 28	*/
+-	NULL,				/* Bit Position 29	*/
+-	NULL,				/* Bit Position 30	*/
+-	NULL,				/* Bit Position 31	*/
++	"DMWrReqBlocked",		/* Bit Position 27	*/
++	"IDECheck",			/* Bit Position 28	*/
++	"MisIDETLP",			/* Bit Position 29	*/
++	"PCRC_CHECK",			/* Bit Position 30	*/
++	"TLPXlatBlocked",		/* Bit Position 31	*/
+ };
+ 
+ static const char *aer_agent_string[] = {
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 705893b5f7b09b..197191178cddc2 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -108,6 +108,12 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
+ 	return report_error_detected(dev, pci_channel_io_normal, data);
+ }
+ 
++static int report_perm_failure_detected(struct pci_dev *dev, void *data)
++{
++	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
++	return 0;
++}
++
+ static int report_mmio_enabled(struct pci_dev *dev, void *data)
+ {
+ 	struct pci_driver *pdrv;
+@@ -275,7 +281,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ failed:
+ 	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+ 
+-	pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
++	pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
+ 
+ 	/* TODO: Should kernel panic here? */
+ 	pci_info(bridge, "device recovery failed\n");
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
+index 9af93e3d8d9ff3..1efbae6c99876e 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
+@@ -352,10 +352,6 @@ extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
+ extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data;
+ extern const struct samsung_pinctrl_of_match_data fsd_of_data;
+ extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
+-extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
+ extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
+ 
+ #endif /* __PINCTRL_SAMSUNG_H */
+diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
+index 99659dc8f5a6df..56ab502e1a80b5 100644
+--- a/drivers/power/supply/max77976_charger.c
++++ b/drivers/power/supply/max77976_charger.c
+@@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy,
+ 	case POWER_SUPPLY_PROP_ONLINE:
+ 		err = max77976_get_online(chg, &val->intval);
+ 		break;
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ 		val->intval = MAX77976_CHG_CC_MAX;
+ 		break;
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 		err = max77976_get_integer(chg, CHG_CC,
+ 					   MAX77976_CHG_CC_MIN,
+ 					   MAX77976_CHG_CC_MAX,
+@@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy,
+ 	int err = 0;
+ 
+ 	switch (psp) {
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 		err = max77976_set_integer(chg, CHG_CC,
+ 					   MAX77976_CHG_CC_MIN,
+ 					   MAX77976_CHG_CC_MAX,
+@@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy,
+ 					  enum power_supply_property psp)
+ {
+ 	switch (psp) {
+-	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ 		return true;
+ 	default:
+@@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = {
+ 	POWER_SUPPLY_PROP_CHARGE_TYPE,
+ 	POWER_SUPPLY_PROP_HEALTH,
+ 	POWER_SUPPLY_PROP_ONLINE,
+-	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+-	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
++	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
++	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ 	POWER_SUPPLY_PROP_MODEL_NAME,
+ 	POWER_SUPPLY_PROP_MANUFACTURER,
+diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
+index 0971c666afd134..14729a6ae09a98 100644
+--- a/drivers/pwm/pwm-berlin.c
++++ b/drivers/pwm/pwm-berlin.c
+@@ -273,7 +273,7 @@ static int berlin_pwm_suspend(struct device *dev)
+ 		if (!channel)
+ 			continue;
+ 
+-		channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
++		channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN);
+ 		channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
+ 		channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY);
+ 		channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT);
+@@ -304,7 +304,7 @@ static int berlin_pwm_resume(struct device *dev)
+ 		berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
+ 		berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
+ 		berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT);
+-		berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE);
++		berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 4a7c41a6c21e7d..93baffe110c00a 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 	else
+ 		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+ 
++	/*
++	 * Check for potential race described above. If the waiting for next
++	 * second, and the second just ticked since the check above, either
++	 *
++	 * 1) It ticked after the alarm was set, and an alarm irq should be
++	 *    generated.
++	 *
++	 * 2) It ticked before the alarm was set, and alarm irq most likely will
++	 * not be generated.
++	 *
++	 * While we cannot easily check for which of these two scenarios we
++	 * are in, we can return -ETIME to signal that the timer has already
++	 * expired, which is true in both cases.
++	 */
++	if ((scheduled - now) <= 1) {
++		err = __rtc_read_time(rtc, &tm);
++		if (err)
++			return err;
++		now = rtc_tm_to_time64(&tm);
++		if (scheduled <= now)
++			return -ETIME;
++	}
++
+ 	trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
+ 	return err;
+ }
+@@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
+ 		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+ 		rtc->uie_rtctimer.period = ktime_set(1, 0);
+ 		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
++		if (!err && rtc->ops && rtc->ops->alarm_irq_enable)
++			err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1);
++		if (err)
++			goto out;
+ 	} else {
+ 		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+ 	}
+diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
+index 9f8b5d4a8f6b65..6b77c122fdc109 100644
+--- a/drivers/rtc/rtc-optee.c
++++ b/drivers/rtc/rtc-optee.c
+@@ -320,6 +320,7 @@ static int optee_rtc_remove(struct device *dev)
+ {
+ 	struct optee_rtc *priv = dev_get_drvdata(dev);
+ 
++	tee_shm_free(priv->shm);
+ 	tee_client_close_session(priv->ctx, priv->session_id);
+ 	tee_client_close_context(priv->ctx);
+ 
+diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
+index 807f953ae0aedc..b7a7ea036f7adc 100644
+--- a/drivers/rtc/rtc-x1205.c
++++ b/drivers/rtc/rtc-x1205.c
+@@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = {
+ MODULE_DEVICE_TABLE(i2c, x1205_id);
+ 
+ static const struct of_device_id x1205_dt_ids[] = {
+-	{ .compatible = "xircom,x1205", },
++	{ .compatible = "xicor,x1205", },
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, x1205_dt_ids);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 49c57a9c110b5f..42a89ab33e1c78 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -6528,18 +6528,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
+ 	while (left) {
+ 		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+ 		buff_size[sg_used] = sz;
+-		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+-		if (buff[sg_used] == NULL) {
+-			status = -ENOMEM;
+-			goto cleanup1;
+-		}
++
+ 		if (ioc->Request.Type.Direction & XFER_WRITE) {
+-			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+-				status = -EFAULT;
++			buff[sg_used] = memdup_user(data_ptr, sz);
++			if (IS_ERR(buff[sg_used])) {
++				status = PTR_ERR(buff[sg_used]);
+ 				goto cleanup1;
+ 			}
+-		} else
+-			memset(buff[sg_used], 0, sz);
++		} else {
++			buff[sg_used] = kzalloc(sz, GFP_KERNEL);
++			if (!buff[sg_used]) {
++				status = -ENOMEM;
++				goto cleanup1;
++			}
++		}
++
+ 		left -= sz;
+ 		data_ptr += sz;
+ 		sg_used++;
+diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
+index 43ebb331e21673..47c4434dc7e796 100644
+--- a/drivers/scsi/mvsas/mv_init.c
++++ b/drivers/scsi/mvsas/mv_init.c
+@@ -139,7 +139,7 @@ static void mvs_free(struct mvs_info *mvi)
+ 	if (mvi->shost)
+ 		scsi_host_put(mvi->shost);
+ 	list_for_each_entry(mwq, &mvi->wq_list, entry)
+-		cancel_delayed_work(&mwq->work_q);
++		cancel_delayed_work_sync(&mwq->work_q);
+ 	kfree(mvi->rsvd_tags);
+ 	kfree(mvi);
+ }
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index bf9b816637d02e..7b809644436eb7 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -687,6 +687,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ 	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ 	reg |= (op->addr.nbytes - 1);
+ 	writel(reg, reg_base + CQSPI_REG_SIZE);
++	readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
+ 	return 0;
+ }
+ 
+@@ -726,6 +727,7 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
+ 	reinit_completion(&cqspi->transfer_complete);
+ 	writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ 	       reg_base + CQSPI_REG_INDIRECTRD);
++	readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
+ 
+ 	while (remaining > 0) {
+ 		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+@@ -1004,6 +1006,7 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
+ 	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ 	reg |= (op->addr.nbytes - 1);
+ 	writel(reg, reg_base + CQSPI_REG_SIZE);
++	readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
+ 	return 0;
+ }
+ 
+@@ -1029,6 +1032,8 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
+ 	reinit_completion(&cqspi->transfer_complete);
+ 	writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ 	       reg_base + CQSPI_REG_INDIRECTWR);
++	readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */
++
+ 	/*
+ 	 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ 	 * Controller programming sequence, couple of cycles of
+diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
+index 4d1634c492ec4d..594b60424d1c64 100644
+--- a/drivers/video/fbdev/core/fb_cmdline.c
++++ b/drivers/video/fbdev/core/fb_cmdline.c
+@@ -40,7 +40,7 @@ int fb_get_options(const char *name, char **option)
+ 	bool enabled;
+ 
+ 	if (name)
+-		is_of = strncmp(name, "offb", 4);
++		is_of = !strncmp(name, "offb", 4);
+ 
+ 	enabled = __video_get_options(name, &options, is_of);
+ 
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 9e3b5d21d09877..3a8f2e659e008d 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1326,14 +1326,17 @@ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
+ 
+-static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
++static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn,
++		     bool percpu)
+ {
+ 	struct evtchn_status status;
+ 	evtchn_port_t port;
+-	int rc = -ENOENT;
++	bool exists = false;
+ 
+ 	memset(&status, 0, sizeof(status));
+ 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
++		int rc;
++
+ 		status.dom = DOMID_SELF;
+ 		status.port = port;
+ 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+@@ -1341,12 +1344,16 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
+ 			continue;
+ 		if (status.status != EVTCHNSTAT_virq)
+ 			continue;
+-		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
++		if (status.u.virq != virq)
++			continue;
++		if (status.vcpu == xen_vcpu_nr(cpu)) {
+ 			*evtchn = port;
+-			break;
++			return 0;
++		} else if (!percpu) {
++			exists = true;
+ 		}
+ 	}
+-	return rc;
++	return exists ? -EEXIST : -ENOENT;
+ }
+ 
+ /**
+@@ -1393,8 +1400,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ 			evtchn = bind_virq.port;
+ 		else {
+ 			if (ret == -EEXIST)
+-				ret = find_virq(virq, cpu, &evtchn);
+-			BUG_ON(ret < 0);
++				ret = find_virq(virq, cpu, &evtchn, percpu);
++			if (ret) {
++				__unbind_from_irq(info, info->irq);
++				goto out;
++			}
+ 		}
+ 
+ 		ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+@@ -1799,9 +1809,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
+ 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
+ 	 * it, but don't do the xenlinux-level rebind in that case.
+ 	 */
+-	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
++	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) {
++		int old_cpu = info->cpu;
++
+ 		bind_evtchn_to_cpu(info, tcpu, false);
+ 
++		if (info->type == IRQT_VIRQ) {
++			int virq = info->u.virq;
++			int irq = per_cpu(virq_to_irq, old_cpu)[virq];
++
++			per_cpu(virq_to_irq, old_cpu)[virq] = -1;
++			per_cpu(virq_to_irq, tcpu)[virq] = irq;
++		}
++	}
++
+ 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+ 
+ 	return 0;
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index c16df629907e13..55537b673990de 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -116,7 +116,7 @@ static void do_suspend(void)
+ 	err = dpm_suspend_start(PMSG_FREEZE);
+ 	if (err) {
+ 		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
+-		goto out_thaw;
++		goto out_resume_end;
+ 	}
+ 
+ 	printk(KERN_DEBUG "suspending xenstore...\n");
+@@ -156,6 +156,7 @@ static void do_suspend(void)
+ 	else
+ 		xs_suspend_cancel();
+ 
++out_resume_end:
+ 	dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+ 
+ out_thaw:
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 203e5964c9b0fc..5e46f8cd7ea073 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -24,7 +24,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ 	int type;
+ 
+ 	if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
+-		*max_len = BTRFS_FID_SIZE_CONNECTABLE;
++		if (btrfs_root_id(BTRFS_I(inode)->root) !=
++		    btrfs_root_id(BTRFS_I(parent)->root))
++			*max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
++		else
++			*max_len = BTRFS_FID_SIZE_CONNECTABLE;
+ 		return FILEID_INVALID;
+ 	} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
+ 		*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
+@@ -46,6 +50,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ 		parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
+ 
+ 		if (parent_root_id != fid->root_objectid) {
++			if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
++				return FILEID_INVALID;
+ 			fid->parent_root_objectid = parent_root_id;
+ 			len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+ 			type = FILEID_BTRFS_WITH_PARENT_ROOT;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index ccf94c5fbfdfd6..88ba277bc3a79e 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -364,6 +364,13 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
+ 	/* step one, find a bunch of delalloc bytes starting at start */
+ 	delalloc_start = *start;
+ 	delalloc_end = 0;
++
++	/*
++	 * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
++	 * return early without handling any dirty ranges.
++	 */
++	ASSERT(max_bytes >= fs_info->sectorsize);
++
+ 	found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+ 					  max_bytes, &cached_state);
+ 	if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
+@@ -394,13 +401,14 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
+ 				  delalloc_start, delalloc_end);
+ 	ASSERT(!ret || ret == -EAGAIN);
+ 	if (ret == -EAGAIN) {
+-		/* some of the pages are gone, lets avoid looping by
+-		 * shortening the size of the delalloc range we're searching
++		/*
++		 * Some of the pages are gone, lets avoid looping by
++		 * shortening the size of the delalloc range we're searching.
+ 		 */
+ 		free_extent_state(cached_state);
+ 		cached_state = NULL;
+ 		if (!loops) {
+-			max_bytes = PAGE_SIZE;
++			max_bytes = fs_info->sectorsize;
+ 			loops = 1;
+ 			goto again;
+ 		} else {
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 2fbf97077ce910..3f06362985b5a6 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -117,9 +117,18 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
+ 		inode_nohighmem(inode);
+ 		inode->i_data.a_ops = &cramfs_aops;
+ 		break;
+-	default:
++	case S_IFCHR:
++	case S_IFBLK:
++	case S_IFIFO:
++	case S_IFSOCK:
+ 		init_special_inode(inode, cramfs_inode->mode,
+ 				old_decode_dev(cramfs_inode->size));
++		break;
++	default:
++		printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n",
++		       inode->i_mode, inode->i_ino);
++		iget_failed(inode);
++		return ERR_PTR(-EIO);
+ 	}
+ 
+ 	inode->i_mode = cramfs_inode->mode;
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 1b68586f73f3fe..c970f41c50483e 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -74,7 +74,8 @@ static int ext4_getfsmap_dev_compare(const void *p1, const void *p2)
+ static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info,
+ 					     struct ext4_fsmap *rec)
+ {
+-	return rec->fmr_physical < info->gfi_low.fmr_physical;
++	return rec->fmr_physical + rec->fmr_length <=
++	       info->gfi_low.fmr_physical;
+ }
+ 
+ /*
+@@ -200,15 +201,18 @@ static int ext4_getfsmap_meta_helper(struct super_block *sb,
+ 			  ext4_group_first_block_no(sb, agno));
+ 	fs_end = fs_start + EXT4_C2B(sbi, len);
+ 
+-	/* Return relevant extents from the meta_list */
++	/*
++	 * Return relevant extents from the meta_list. We emit all extents that
++	 * partially/fully overlap with the query range
++	 */
+ 	list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
+-		if (p->fmr_physical < info->gfi_next_fsblk) {
++		if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
+ 			list_del(&p->fmr_list);
+ 			kfree(p);
+ 			continue;
+ 		}
+-		if (p->fmr_physical <= fs_start ||
+-		    p->fmr_physical + p->fmr_length <= fs_end) {
++		if (p->fmr_physical <= fs_end &&
++		    p->fmr_physical + p->fmr_length > fs_start) {
+ 			/* Emit the retained free extent record if present */
+ 			if (info->gfi_lastfree.fmr_owner) {
+ 				error = ext4_getfsmap_helper(sb, info,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 63b1384823ba56..91a9fa6f1ad4f0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3871,7 +3871,11 @@ int ext4_can_truncate(struct inode *inode)
+  * We have to make sure i_disksize gets properly updated before we truncate
+  * page cache due to hole punching or zero range. Otherwise i_disksize update
+  * can get lost as it may have been postponed to submission of writeback but
+- * that will never happen after we truncate page cache.
++ * that will never happen if we remove the folio containing i_size from the
++ * page cache. Also if we punch hole within i_size but above i_disksize,
++ * following ext4_page_mkwrite() may mistakenly allocate written blocks over
++ * the hole and thus introduce allocated blocks beyond i_disksize which is
++ * not allowed (e2fsck would complain in case of crash).
+  */
+ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ 				      loff_t len)
+@@ -3882,9 +3886,11 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
+ 	loff_t size = i_size_read(inode);
+ 
+ 	WARN_ON(!inode_is_locked(inode));
+-	if (offset > size || offset + len < size)
++	if (offset > size)
+ 		return 0;
+ 
++	if (offset + len < size)
++		size = offset + len;
+ 	if (EXT4_I(inode)->i_disksize >= size)
+ 		return 0;
+ 
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 5e6b07b3496006..c566161127cd7a 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -225,7 +225,7 @@ static int mext_page_mkuptodate(struct folio *folio, size_t from, size_t to)
+ 	do {
+ 		if (bh_offset(bh) + blocksize <= from)
+ 			continue;
+-		if (bh_offset(bh) > to)
++		if (bh_offset(bh) >= to)
+ 			break;
+ 		wait_on_buffer(bh);
+ 		if (buffer_uptodate(bh))
+diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
+index c53918768cb256..05997b4d012038 100644
+--- a/fs/ext4/orphan.c
++++ b/fs/ext4/orphan.c
+@@ -513,7 +513,7 @@ void ext4_release_orphan_info(struct super_block *sb)
+ 		return;
+ 	for (i = 0; i < oi->of_blocks; i++)
+ 		brelse(oi->of_binfo[i].ob_bh);
+-	kfree(oi->of_binfo);
++	kvfree(oi->of_binfo);
+ }
+ 
+ static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
+@@ -584,9 +584,20 @@ int ext4_init_orphan_info(struct super_block *sb)
+ 		ext4_msg(sb, KERN_ERR, "get orphan inode failed");
+ 		return PTR_ERR(inode);
+ 	}
++	/*
++	 * This is just an artificial limit to prevent corrupted fs from
++	 * consuming absurd amounts of memory when pinning blocks of orphan
++	 * file in memory.
++	 */
++	if (inode->i_size > 8 << 20) {
++		ext4_msg(sb, KERN_ERR, "orphan file too big: %llu",
++			 (unsigned long long)inode->i_size);
++		ret = -EFSCORRUPTED;
++		goto out_put;
++	}
+ 	oi->of_blocks = inode->i_size >> sb->s_blocksize_bits;
+ 	oi->of_csum_seed = EXT4_I(inode)->i_csum_seed;
+-	oi->of_binfo = kmalloc_array(oi->of_blocks,
++	oi->of_binfo = kvmalloc_array(oi->of_blocks,
+ 				     sizeof(struct ext4_orphan_block),
+ 				     GFP_KERNEL);
+ 	if (!oi->of_binfo) {
+@@ -627,7 +638,7 @@ int ext4_init_orphan_info(struct super_block *sb)
+ out_free:
+ 	for (i--; i >= 0; i--)
+ 		brelse(oi->of_binfo[i].ob_bh);
+-	kfree(oi->of_binfo);
++	kvfree(oi->of_binfo);
+ out_put:
+ 	iput(inode);
+ 	return ret;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index ca22aa9e04b4b1..66933e55efb3bd 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -251,6 +251,10 @@ check_xattrs(struct inode *inode, struct buffer_head *bh,
+ 			err_str = "invalid ea_ino";
+ 			goto errout;
+ 		}
++		if (ea_ino && !size) {
++			err_str = "invalid size in ea xattr";
++			goto errout;
++		}
+ 		if (size > EXT4_XATTR_SIZE_MAX) {
+ 			err_str = "e_value size too large";
+ 			goto errout;
+@@ -1036,7 +1040,7 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 				       int ref_change)
+ {
+ 	struct ext4_iloc iloc;
+-	s64 ref_count;
++	u64 ref_count;
+ 	int ret;
+ 
+ 	inode_lock_nested(ea_inode, I_MUTEX_XATTR);
+@@ -1046,13 +1050,17 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 		goto out;
+ 
+ 	ref_count = ext4_xattr_inode_get_ref(ea_inode);
++	if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) {
++		ext4_error_inode(ea_inode, __func__, __LINE__, 0,
++			"EA inode %lu ref wraparound: ref_count=%lld ref_change=%d",
++			ea_inode->i_ino, ref_count, ref_change);
++		ret = -EFSCORRUPTED;
++		goto out;
++	}
+ 	ref_count += ref_change;
+ 	ext4_xattr_inode_set_ref(ea_inode, ref_count);
+ 
+ 	if (ref_change > 0) {
+-		WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
+-			  ea_inode->i_ino, ref_count);
+-
+ 		if (ref_count == 1) {
+ 			WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
+ 				  ea_inode->i_ino, ea_inode->i_nlink);
+@@ -1061,9 +1069,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ 			ext4_orphan_del(handle, ea_inode);
+ 		}
+ 	} else {
+-		WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
+-			  ea_inode->i_ino, ref_count);
+-
+ 		if (ref_count == 0) {
+ 			WARN_ONCE(ea_inode->i_nlink != 1,
+ 				  "EA inode %lu i_nlink=%u",
+diff --git a/fs/file.c b/fs/file.c
+index 0ce6a6930276d3..9b76b9644ac275 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1138,7 +1138,10 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
+ 	err = expand_files(files, fd);
+ 	if (unlikely(err < 0))
+ 		goto out_unlock;
+-	return do_dup2(files, file, fd, flags);
++	err = do_dup2(files, file, fd, flags);
++	if (err < 0)
++		return err;
++	return 0;
+ 
+ out_unlock:
+ 	spin_unlock(&files->file_lock);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index ed110568d6127f..274fae88b498e4 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -422,22 +422,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
+ 	 * Transfer to @new_wb's IO list if necessary.  If the @inode is dirty,
+ 	 * the specific list @inode was on is ignored and the @inode is put on
+ 	 * ->b_dirty which is always correct including from ->b_dirty_time.
+-	 * The transfer preserves @inode->dirtied_when ordering.  If the @inode
+-	 * was clean, it means it was on the b_attached list, so move it onto
+-	 * the b_attached list of @new_wb.
++	 * If the @inode was clean, it means it was on the b_attached list, so
++	 * move it onto the b_attached list of @new_wb.
+ 	 */
+ 	if (!list_empty(&inode->i_io_list)) {
+ 		inode->i_wb = new_wb;
+ 
+ 		if (inode->i_state & I_DIRTY_ALL) {
+-			struct inode *pos;
+-
+-			list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
+-				if (time_after_eq(inode->dirtied_when,
+-						  pos->dirtied_when))
+-					break;
++			/*
++			 * We need to keep b_dirty list sorted by
++			 * dirtied_time_when. However properly sorting the
++			 * inode in the list gets too expensive when switching
++			 * many inodes. So just attach inode at the end of the
++			 * dirty list and clobber the dirtied_time_when.
++			 */
++			inode->dirtied_time_when = jiffies;
+ 			inode_io_list_move_locked(inode, new_wb,
+-						  pos->i_io_list.prev);
++						  &new_wb->b_dirty);
+ 		} else {
+ 			inode_cgwb_move_to_attached(inode, new_wb);
+ 		}
+@@ -479,6 +480,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 	 */
+ 	down_read(&bdi->wb_switch_rwsem);
+ 
++	inodep = isw->inodes;
+ 	/*
+ 	 * By the time control reaches here, RCU grace period has passed
+ 	 * since I_WB_SWITCH assertion and all wb stat update transactions
+@@ -489,6 +491,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 	 * gives us exclusion against all wb related operations on @inode
+ 	 * including IO list manipulations and stat updates.
+ 	 */
++relock:
+ 	if (old_wb < new_wb) {
+ 		spin_lock(&old_wb->list_lock);
+ 		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
+@@ -497,10 +500,17 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
+ 	}
+ 
+-	for (inodep = isw->inodes; *inodep; inodep++) {
++	while (*inodep) {
+ 		WARN_ON_ONCE((*inodep)->i_wb != old_wb);
+ 		if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
+ 			nr_switched++;
++		inodep++;
++		if (*inodep && need_resched()) {
++			spin_unlock(&new_wb->list_lock);
++			spin_unlock(&old_wb->list_lock);
++			cond_resched();
++			goto relock;
++		}
+ 	}
+ 
+ 	spin_unlock(&new_wb->list_lock);
+diff --git a/fs/fsopen.c b/fs/fsopen.c
+index ce03f6521c8870..3576b1084ec2fb 100644
+--- a/fs/fsopen.c
++++ b/fs/fsopen.c
+@@ -18,50 +18,56 @@
+ #include "internal.h"
+ #include "mount.h"
+ 
++static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
++					       bool *need_free)
++{
++	const char *p;
++	int index;
++
++	if (unlikely(log->head == log->tail))
++		return ERR_PTR(-ENODATA);
++
++	index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
++	p = log->buffer[index];
++	if (unlikely(strlen(p) > len))
++		return ERR_PTR(-EMSGSIZE);
++
++	log->buffer[index] = NULL;
++	*need_free = log->need_free & (1 << index);
++	log->need_free &= ~(1 << index);
++	log->tail++;
++
++	return p;
++}
++
+ /*
+  * Allow the user to read back any error, warning or informational messages.
++ * Only one message is returned for each read(2) call.
+  */
+ static ssize_t fscontext_read(struct file *file,
+ 			      char __user *_buf, size_t len, loff_t *pos)
+ {
+ 	struct fs_context *fc = file->private_data;
+-	struct fc_log *log = fc->log.log;
+-	unsigned int logsize = ARRAY_SIZE(log->buffer);
+-	ssize_t ret;
+-	char *p;
++	ssize_t err;
++	const char *p __free(kfree) = NULL, *message;
+ 	bool need_free;
+-	int index, n;
++	int n;
+ 
+-	ret = mutex_lock_interruptible(&fc->uapi_mutex);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (log->head == log->tail) {
+-		mutex_unlock(&fc->uapi_mutex);
+-		return -ENODATA;
+-	}
+-
+-	index = log->tail & (logsize - 1);
+-	p = log->buffer[index];
+-	need_free = log->need_free & (1 << index);
+-	log->buffer[index] = NULL;
+-	log->need_free &= ~(1 << index);
+-	log->tail++;
++	err = mutex_lock_interruptible(&fc->uapi_mutex);
++	if (err < 0)
++		return err;
++	message = fetch_message_locked(fc->log.log, len, &need_free);
+ 	mutex_unlock(&fc->uapi_mutex);
++	if (IS_ERR(message))
++		return PTR_ERR(message);
+ 
+-	ret = -EMSGSIZE;
+-	n = strlen(p);
+-	if (n > len)
+-		goto err_free;
+-	ret = -EFAULT;
+-	if (copy_to_user(_buf, p, n) != 0)
+-		goto err_free;
+-	ret = n;
+-
+-err_free:
+ 	if (need_free)
+-		kfree(p);
+-	return ret;
++		p = message;
++
++	n = strlen(message);
++	if (copy_to_user(_buf, message, n))
++		return -EFAULT;
++	return n;
+ }
+ 
+ static int fscontext_release(struct inode *inode, struct file *file)
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index df575473c1cc0b..ee8a6fe360e728 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -470,8 +470,14 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
+ 		inode->i_op = &minix_symlink_inode_operations;
+ 		inode_nohighmem(inode);
+ 		inode->i_mapping->a_ops = &minix_aops;
+-	} else
++	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
++		   S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ 		init_special_inode(inode, inode->i_mode, rdev);
++	} else {
++		printk(KERN_DEBUG "MINIX-fs: Invalid file type 0%04o for inode %lu.\n",
++		       inode->i_mode, inode->i_ino);
++		make_bad_inode(inode);
++	}
+ }
+ 
+ /*
+diff --git a/fs/namei.c b/fs/namei.c
+index 155e4d09a5fb2c..02b0c273129824 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1364,6 +1364,10 @@ static int follow_automount(struct path *path, int *count, unsigned lookup_flags
+ 	    dentry->d_inode)
+ 		return -EISDIR;
+ 
++	/* No need to trigger automounts if mountpoint crossing is disabled. */
++	if (lookup_flags & LOOKUP_NO_XDEV)
++		return -EXDEV;
++
+ 	if (count && (*count)++ >= MAXSYMLINKS)
+ 		return -ELOOP;
+ 
+@@ -1387,6 +1391,10 @@ static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
+ 		/* Allow the filesystem to manage the transit without i_mutex
+ 		 * being held. */
+ 		if (flags & DCACHE_MANAGE_TRANSIT) {
++			if (lookup_flags & LOOKUP_NO_XDEV) {
++				ret = -EXDEV;
++				break;
++			}
+ 			ret = path->dentry->d_op->d_manage(path, false);
+ 			flags = smp_load_acquire(&path->dentry->d_flags);
+ 			if (ret < 0)
+diff --git a/fs/namespace.c b/fs/namespace.c
+index f79226472251ba..646d9e7d41ee8a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -64,6 +64,15 @@ static int __init set_mphash_entries(char *str)
+ }
+ __setup("mphash_entries=", set_mphash_entries);
+ 
++static char * __initdata initramfs_options;
++static int __init initramfs_options_setup(char *str)
++{
++	initramfs_options = str;
++	return 1;
++}
++
++__setup("initramfs_options=", initramfs_options_setup);
++
+ static u64 event;
+ static DEFINE_IDA(mnt_id_ida);
+ static DEFINE_IDA(mnt_group_ida);
+@@ -4728,7 +4737,7 @@ static void __init init_mount_tree(void)
+ 	struct mnt_namespace *ns;
+ 	struct path root;
+ 
+-	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
++	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", initramfs_options);
+ 	if (IS_ERR(mnt))
+ 		panic("Can't create rootfs");
+ 
+diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
+index 46a7f9b813e527..b02886f38925f8 100644
+--- a/fs/nfsd/lockd.c
++++ b/fs/nfsd/lockd.c
+@@ -48,6 +48,21 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
+ 	switch (nfserr) {
+ 	case nfs_ok:
+ 		return 0;
++	case nfserr_jukebox:
++		/* this error can indicate a presence of a conflicting
++		 * delegation to an NLM lock request. Options are:
++		 * (1) For now, drop this request and make the client
++		 * retry. When delegation is returned, client's lock retry
++		 * will complete.
++		 * (2) NLM4_DENIED as per "spec" signals to the client
++		 * that the lock is unavailable now but client can retry.
++		 * Linux client implementation does not. It treats
++		 * NLM4_DENIED same as NLM4_FAILED and errors the request.
++		 * (3) For the future, treat this as blocked lock and try
++		 * to callback when the delegation is returned but might
++		 * not have a proper lock request to block on.
++		 */
++		fallthrough;
+ 	case nfserr_dropit:
+ 		return nlm_drop_reply;
+ 	case nfserr_stale:
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index cd78b7ecbd4325..e9c1271b7ecc39 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1368,7 +1368,7 @@ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+ 		return 0;
+ 	}
+ 	if (work) {
+-		strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
++		strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ 		refcount_set(&work->nsui_refcnt, 2);
+ 		work->nsui_busy = true;
+ 		list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index cf4fe21a50399b..29b585f443f3eb 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -1399,6 +1399,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ 		mark_buffer_dirty(bh);
+ 		unlock_buffer(bh);
+ 		/* err = sync_dirty_buffer(bh); */
++		put_bh(bh);
+ 
+ 		b0 = 0;
+ 		bits -= op;
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index e62d9cc592e0c8..c80b291a14a576 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -671,14 +671,72 @@ static int cifs_query_path_info(const unsigned int xid,
+ 	}
+ 
+ #ifdef CONFIG_CIFS_XATTR
++	/*
++	 * For non-symlink WSL reparse points it is required to fetch
++	 * EA $LXMOD which contains in its S_DT part the mandatory file type.
++	 */
++	if (!rc && data->reparse_point) {
++		struct smb2_file_full_ea_info *ea;
++		u32 next = 0;
++
++		ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++		do {
++			ea = (void *)((u8 *)ea + next);
++			next = le32_to_cpu(ea->next_entry_offset);
++		} while (next);
++		if (le16_to_cpu(ea->ea_value_length)) {
++			ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
++						ea->ea_name_length + 1 +
++						le16_to_cpu(ea->ea_value_length), 4));
++			ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
++		}
++
++		rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_MODE,
++				    &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
++				    SMB2_WSL_XATTR_MODE_SIZE, cifs_sb);
++		if (rc == SMB2_WSL_XATTR_MODE_SIZE) {
++			ea->next_entry_offset = cpu_to_le32(0);
++			ea->flags = 0;
++			ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
++			ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_MODE_SIZE);
++			memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_MODE, SMB2_WSL_XATTR_NAME_LEN + 1);
++			data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
++						   SMB2_WSL_XATTR_MODE_SIZE, 4);
++			rc = 0;
++		} else if (rc >= 0) {
++			/* It is an error if EA $LXMOD has wrong size. */
++			rc = -EINVAL;
++		} else {
++			/*
++			 * In all other cases ignore error if fetching
++			 * of EA $LXMOD failed. It is needed only for
++			 * non-symlink WSL reparse points and wsl_to_fattr()
++			 * handle the case when EA is missing.
++			 */
++			rc = 0;
++		}
++	}
++
+ 	/*
+ 	 * For WSL CHR and BLK reparse points it is required to fetch
+ 	 * EA $LXDEV which contains major and minor device numbers.
+ 	 */
+ 	if (!rc && data->reparse_point) {
+ 		struct smb2_file_full_ea_info *ea;
++		u32 next = 0;
+ 
+ 		ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
++		do {
++			ea = (void *)((u8 *)ea + next);
++			next = le32_to_cpu(ea->next_entry_offset);
++		} while (next);
++		if (le16_to_cpu(ea->ea_value_length)) {
++			ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
++						ea->ea_name_length + 1 +
++						le16_to_cpu(ea->ea_value_length), 4));
++			ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
++		}
++
+ 		rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
+ 				    &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ 				    SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
+@@ -688,8 +746,8 @@ static int cifs_query_path_info(const unsigned int xid,
+ 			ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ 			ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
+ 			memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
+-			data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+-					    SMB2_WSL_XATTR_DEV_SIZE;
++			data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
++						   SMB2_WSL_XATTR_MODE_SIZE, 4);
+ 			rc = 0;
+ 		} else if (rc >= 0) {
+ 			/* It is an error if EA $LXDEV has wrong size. */
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 79641d1ee86757..232a3c28905568 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -1216,31 +1216,33 @@ int
+ smb2_set_file_info(struct inode *inode, const char *full_path,
+ 		   FILE_BASIC_INFO *buf, const unsigned int xid)
+ {
+-	struct cifs_open_parms oparms;
++	struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++	struct cifsFileInfo *cfile = NULL;
++	struct cifs_open_parms oparms;
+ 	struct tcon_link *tlink;
+ 	struct cifs_tcon *tcon;
+-	struct cifsFileInfo *cfile;
+-	struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
+-	int rc;
+-
+-	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+-	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
+-	    (buf->Attributes == 0))
+-		return 0; /* would be a no op, no sense sending this */
++	int rc = 0;
+ 
+ 	tlink = cifs_sb_tlink(cifs_sb);
+ 	if (IS_ERR(tlink))
+ 		return PTR_ERR(tlink);
+ 	tcon = tlink_tcon(tlink);
+ 
+-	cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++	    (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
++		if (buf->Attributes == 0)
++			goto out; /* would be a no op, no sense sending this */
++		cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
++	}
++
+ 	oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
+ 			     FILE_OPEN, 0, ACL_NO_MODE);
+ 	rc = smb2_compound_op(xid, tcon, cifs_sb,
+ 			      full_path, &oparms, &in_iov,
+ 			      &(int){SMB2_OP_SET_INFO}, 1,
+ 			      cfile, NULL, NULL, NULL);
++out:
+ 	cifs_put_tlink(tlink);
+ 	return rc;
+ }
+diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
+index f4e55199938d58..c6c1844d444822 100644
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -108,10 +108,11 @@ struct ksmbd_startup_request {
+ 	__u32	smb2_max_credits;	/* MAX credits */
+ 	__u32	smbd_max_io_size;	/* smbd read write size */
+ 	__u32	max_connections;	/* Number of maximum simultaneous connections */
+-	__u32	reserved[126];		/* Reserved room */
++	__u32	max_ip_connections;	/* Number of maximum connection per ip address */
++	__u32	reserved[125];		/* Reserved room */
+ 	__u32	ifc_list_sz;		/* interfaces list size */
+ 	__s8	____payload[];
+-};
++} __packed;
+ 
+ #define KSMBD_STARTUP_CONFIG_INTERFACES(s)	((s)->____payload)
+ 
+diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
+index 4d06f2eb0d6adb..d0744498ceed66 100644
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -43,6 +43,7 @@ struct ksmbd_server_config {
+ 	unsigned int		auth_mechs;
+ 	unsigned int		max_connections;
+ 	unsigned int		max_inflight_req;
++	unsigned int		max_ip_connections;
+ 
+ 	char			*conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index 281101fd1f76f1..80581a7bc1bcc7 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -321,6 +321,9 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
+ 	if (req->max_connections)
+ 		server_conf.max_connections = req->max_connections;
+ 
++	if (req->max_ip_connections)
++		server_conf.max_ip_connections = req->max_ip_connections;
++
+ 	ret = ksmbd_set_netbios_name(req->netbios_name);
+ 	ret |= ksmbd_set_server_string(req->server_string);
+ 	ret |= ksmbd_set_work_group(req->work_group);
+diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
+index 53c536f2ce9f9c..c43a465114289b 100644
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -240,6 +240,7 @@ static int ksmbd_kthread_fn(void *p)
+ 	struct interface *iface = (struct interface *)p;
+ 	struct ksmbd_conn *conn;
+ 	int ret;
++	unsigned int max_ip_conns;
+ 
+ 	while (!kthread_should_stop()) {
+ 		mutex_lock(&iface->sock_release_lock);
+@@ -257,34 +258,39 @@ static int ksmbd_kthread_fn(void *p)
+ 			continue;
+ 		}
+ 
++		if (!server_conf.max_ip_connections)
++			goto skip_max_ip_conns_limit;
++
+ 		/*
+ 		 * Limits repeated connections from clients with the same IP.
+ 		 */
++		max_ip_conns = 0;
+ 		down_read(&conn_list_lock);
+-		list_for_each_entry(conn, &conn_list, conns_list)
++		list_for_each_entry(conn, &conn_list, conns_list) {
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			if (client_sk->sk->sk_family == AF_INET6) {
+ 				if (memcmp(&client_sk->sk->sk_v6_daddr,
+-					   &conn->inet6_addr, 16) == 0) {
+-					ret = -EAGAIN;
+-					break;
+-				}
++					   &conn->inet6_addr, 16) == 0)
++					max_ip_conns++;
+ 			} else if (inet_sk(client_sk->sk)->inet_daddr ==
+-				 conn->inet_addr) {
+-				ret = -EAGAIN;
+-				break;
+-			}
++				 conn->inet_addr)
++				max_ip_conns++;
+ #else
+ 			if (inet_sk(client_sk->sk)->inet_daddr ==
+-			    conn->inet_addr) {
++			    conn->inet_addr)
++				max_ip_conns++;
++#endif
++			if (server_conf.max_ip_connections <= max_ip_conns) {
+ 				ret = -EAGAIN;
+ 				break;
+ 			}
+-#endif
++		}
+ 		up_read(&conn_list_lock);
+ 		if (ret == -EAGAIN)
+ 			continue;
+ 
++skip_max_ip_conns_limit:
++
+ 		if (server_conf.max_connections &&
+ 		    atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+ 			pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index 53104f25de5116..f5dcb8353f862f 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
++		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ 		frag = le32_to_cpu(sqsh_ino->fragment);
+ 		if (frag != SQUASHFS_INVALID_FRAG) {
++			/*
++			 * the file cannot have a fragment (tailend) and have a
++			 * file size a multiple of the block size
++			 */
++			if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++				err = -EINVAL;
++				goto failed_read;
++			}
+ 			frag_offset = le32_to_cpu(sqsh_ino->offset);
+ 			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ 			if (frag_size < 0) {
+@@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		}
+ 
+ 		set_nlink(inode, 1);
+-		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ 		inode->i_fop = &generic_ro_fops;
+ 		inode->i_mode |= S_IFREG;
+ 		inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+@@ -184,8 +192,21 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
++		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
++		if (inode->i_size < 0) {
++			err = -EINVAL;
++			goto failed_read;
++		}
+ 		frag = le32_to_cpu(sqsh_ino->fragment);
+ 		if (frag != SQUASHFS_INVALID_FRAG) {
++			/*
++			 * the file cannot have a fragment (tailend) and have a
++			 * file size a multiple of the block size
++			 */
++			if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++				err = -EINVAL;
++				goto failed_read;
++			}
+ 			frag_offset = le32_to_cpu(sqsh_ino->offset);
+ 			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ 			if (frag_size < 0) {
+@@ -200,7 +221,6 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 
+ 		xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ 		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+-		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+ 		inode->i_op = &squashfs_inode_ops;
+ 		inode->i_fop = &generic_ro_fops;
+ 		inode->i_mode |= S_IFREG;
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 7e9e7e76904369..2b5891515f3d82 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -213,6 +213,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
+  */
+ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
+ 
++/*
++ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
++ * (such as loong_arch) may not have and not use Global Lock.
++ */
++ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
++
+ /*
+  * Maximum timeout for While() loop iterations before forced method abort.
+  * This mechanism is intended to prevent infinite loops during interpreter
+diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
+index bac63e874c7bf9..52db695436bb0b 100644
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -74,6 +74,7 @@
+ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+ #include <linux/tracepoint-defs.h>
+ 
++#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
+ DECLARE_TRACEPOINT(rwmmio_write);
+ DECLARE_TRACEPOINT(rwmmio_post_write);
+ DECLARE_TRACEPOINT(rwmmio_read);
+@@ -90,6 +91,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ 
+ #else
+ 
++#define rwmmio_tracepoint_enabled(tracepoint) false
+ static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ 				  unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+@@ -188,11 +190,13 @@ static inline u8 readb(const volatile void __iomem *addr)
+ {
+ 	u8 val;
+ 
+-	log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __raw_readb(addr);
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -203,11 +207,13 @@ static inline u16 readw(const volatile void __iomem *addr)
+ {
+ 	u16 val;
+ 
+-	log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -218,11 +224,13 @@ static inline u32 readl(const volatile void __iomem *addr)
+ {
+ 	u32 val;
+ 
+-	log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -234,11 +242,13 @@ static inline u64 readq(const volatile void __iomem *addr)
+ {
+ 	u64 val;
+ 
+-	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+ 	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ 	__io_ar(val);
+-	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -248,11 +258,13 @@ static inline u64 readq(const volatile void __iomem *addr)
+ #define writeb writeb
+ static inline void writeb(u8 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writeb(value, addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -260,11 +272,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
+ #define writew writew
+ static inline void writew(u16 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writew((u16 __force)cpu_to_le16(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -272,11 +286,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
+ #define writel writel
+ static inline void writel(u32 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -285,11 +301,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
+ #define writeq writeq
+ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+ 	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ 	__io_aw();
+-	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ #endif /* CONFIG_64BIT */
+@@ -305,9 +323,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
+ {
+ 	u8 val;
+ 
+-	log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ 	val = __raw_readb(addr);
+-	log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -318,9 +338,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
+ {
+ 	u16 val;
+ 
+-	log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+-	log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -331,9 +353,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
+ {
+ 	u32 val;
+ 
+-	log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+-	log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -344,9 +368,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
+ {
+ 	u64 val;
+ 
+-	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_read))
++		log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ 	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+-	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++		log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+ #endif
+@@ -355,9 +381,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
+ #define writeb_relaxed writeb_relaxed
+ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writeb(value, addr);
+-	log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -365,9 +393,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ #define writew_relaxed writew_relaxed
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writew((u16 __force)cpu_to_le16(value), addr);
+-	log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -375,9 +405,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ #define writel_relaxed writel_relaxed
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writel((u32 __force)__cpu_to_le32(value), addr);
+-	log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+@@ -385,9 +417,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ #define writeq_relaxed writeq_relaxed
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+-	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_write))
++		log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ 	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+-	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++	if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++		log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ 
+diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
+index de45cf2ee1e4f8..ce2086f97e3fcf 100644
+--- a/include/linux/iio/frequency/adf4350.h
++++ b/include/linux/iio/frequency/adf4350.h
+@@ -51,7 +51,7 @@
+ 
+ /* REG3 Bit Definitions */
+ #define ADF4350_REG3_12BIT_CLKDIV(x)		((x) << 3)
+-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x)	((x) << 16)
++#define ADF4350_REG3_12BIT_CLKDIV_MODE(x)	((x) << 15)
+ #define ADF4350_REG3_12BIT_CSR_EN		(1 << 18)
+ #define ADF4351_REG3_CHARGE_CANCELLATION_EN	(1 << 21)
+ #define ADF4351_REG3_ANTI_BACKLASH_3ns_EN	(1 << 22)
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index b9cdeba03668ae..f74c5222484017 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -59,6 +59,12 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ 	int ret;
+ 
+ 	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
++		long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
++
++		mm->ksm_merging_pages = 0;
++		mm->ksm_rmap_items = 0;
++		atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
++
+ 		ret = __ksm_enter(mm);
+ 		if (ret)
+ 			return ret;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index cb38eee732fd02..9559501236af08 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2343,6 +2343,12 @@ enum rseq_event_mask {
+ 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
+ };
+ 
++#ifdef CONFIG_MEMBARRIER
++# define RSEQ_EVENT_GUARD	irq
++#else
++# define RSEQ_EVENT_GUARD	preempt
++#endif
++
+ static inline void rseq_set_notify_resume(struct task_struct *t)
+ {
+ 	if (t->rseq)
+@@ -2361,9 +2367,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ static inline void rseq_signal_deliver(struct ksignal *ksig,
+ 				       struct pt_regs *regs)
+ {
+-	preempt_disable();
+-	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+-	preempt_enable();
++	scoped_guard(RSEQ_EVENT_GUARD)
++		__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ 	rseq_handle_notify_resume(ksig, regs);
+ }
+ 
+diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
+index 0740dfc6c04881..61a497ec5a2a96 100644
+--- a/include/media/v4l2-subdev.h
++++ b/include/media/v4l2-subdev.h
+@@ -1881,19 +1881,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
+  *
+  * Note: only legacy non-MC drivers may need this macro.
+  */
+-#define v4l2_subdev_call_state_try(sd, o, f, args...)                 \
+-	({                                                            \
+-		int __result;                                         \
+-		static struct lock_class_key __key;                   \
+-		const char *name = KBUILD_BASENAME                    \
+-			":" __stringify(__LINE__) ":state->lock";     \
+-		struct v4l2_subdev_state *state =                     \
+-			__v4l2_subdev_state_alloc(sd, name, &__key);  \
+-		v4l2_subdev_lock_state(state);                        \
+-		__result = v4l2_subdev_call(sd, o, f, state, ##args); \
+-		v4l2_subdev_unlock_state(state);                      \
+-		__v4l2_subdev_state_free(state);                      \
+-		__result;                                             \
++#define v4l2_subdev_call_state_try(sd, o, f, args...)                         \
++	({                                                                    \
++		int __result;                                                 \
++		static struct lock_class_key __key;                           \
++		const char *name = KBUILD_BASENAME                            \
++			":" __stringify(__LINE__) ":state->lock";             \
++		struct v4l2_subdev_state *state =                             \
++			__v4l2_subdev_state_alloc(sd, name, &__key);          \
++		if (IS_ERR(state)) {                                          \
++			__result = PTR_ERR(state);                            \
++		} else {                                                      \
++			v4l2_subdev_lock_state(state);                        \
++			__result = v4l2_subdev_call(sd, o, f, state, ##args); \
++			v4l2_subdev_unlock_state(state);                      \
++			__v4l2_subdev_state_free(state);                      \
++		}                                                             \
++		__result;                                                     \
+ 	})
+ 
+ /**
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index c0f4b91e4f5ece..32606d54306057 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -955,8 +955,7 @@ struct nft_expr_ops {
+ 						const struct nft_expr *expr,
+ 						bool reset);
+ 	int				(*validate)(const struct nft_ctx *ctx,
+-						    const struct nft_expr *expr,
+-						    const struct nft_data **data);
++						    const struct nft_expr *expr);
+ 	bool				(*reduce)(struct nft_regs_track *track,
+ 						  const struct nft_expr *expr);
+ 	bool				(*gc)(struct net *net,
+diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
+index 167640b843ef8a..38cae7113de462 100644
+--- a/include/net/netfilter/nft_fib.h
++++ b/include/net/netfilter/nft_fib.h
+@@ -21,9 +21,7 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
+ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 		 const struct nlattr * const tb[]);
+-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+-		     const struct nft_data **data);
+-
++int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr);
+ 
+ void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ 			const struct nft_pktinfo *pkt);
+diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
+index ba1238f12a487a..d602263590fed5 100644
+--- a/include/net/netfilter/nft_meta.h
++++ b/include/net/netfilter/nft_meta.h
+@@ -41,8 +41,7 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
+ 			  const struct nft_expr *expr);
+ 
+ int nft_meta_set_validate(const struct nft_ctx *ctx,
+-			  const struct nft_expr *expr,
+-			  const struct nft_data **data);
++			  const struct nft_expr *expr);
+ 
+ bool nft_meta_get_reduce(struct nft_regs_track *track,
+ 			 const struct nft_expr *expr);
+diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
+index 6d9ba62efd7504..19060212988a1e 100644
+--- a/include/net/netfilter/nft_reject.h
++++ b/include/net/netfilter/nft_reject.h
+@@ -15,8 +15,7 @@ struct nft_reject {
+ extern const struct nla_policy nft_reject_policy[];
+ 
+ int nft_reject_validate(const struct nft_ctx *ctx,
+-			const struct nft_expr *expr,
+-			const struct nft_data **data);
++			const struct nft_expr *expr);
+ 
+ int nft_reject_init(const struct nft_ctx *ctx,
+ 		    const struct nft_expr *expr,
+diff --git a/init/main.c b/init/main.c
+index c787e94cc8982b..20d2dd4d9b40f0 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -530,6 +530,12 @@ static int __init unknown_bootoption(char *param, char *val,
+ 				     const char *unused, void *arg)
+ {
+ 	size_t len = strlen(param);
++	/*
++	 * Well-known bootloader identifiers:
++	 * 1. LILO/Grub pass "BOOT_IMAGE=...";
++	 * 2. kexec/kdump (kexec-tools) pass "kexec".
++	 */
++	const char *bootloader[] = { "BOOT_IMAGE=", "kexec", NULL };
+ 
+ 	/* Handle params aliased to sysctls */
+ 	if (sysctl_is_alias(param))
+@@ -537,6 +543,12 @@ static int __init unknown_bootoption(char *param, char *val,
+ 
+ 	repair_env_string(param, val);
+ 
++	/* Handle bootloader identifier */
++	for (int i = 0; bootloader[i]; i++) {
++		if (strstarts(param, bootloader[i]))
++			return 0;
++	}
++
+ 	/* Handle obsolete-style parameters */
+ 	if (obsolete_checksetup(param))
+ 		return 0;
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index 99d0625b6c828f..9a9630adcba4f6 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -607,7 +607,7 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
+-static void bpf_free_inode(struct inode *inode)
++static void bpf_destroy_inode(struct inode *inode)
+ {
+ 	enum bpf_type type;
+ 
+@@ -622,7 +622,7 @@ static const struct super_operations bpf_super_ops = {
+ 	.statfs		= simple_statfs,
+ 	.drop_inode	= generic_delete_inode,
+ 	.show_options	= bpf_show_options,
+-	.free_inode	= bpf_free_inode,
++	.destroy_inode	= bpf_destroy_inode,
+ };
+ 
+ enum {
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 0e20d7e9460848..2141ebb2ef92ad 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1799,7 +1799,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
+ 	return 0;
+ }
+ 
+-static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
++static int copy_sighand(u64 clone_flags, struct task_struct *tsk)
+ {
+ 	struct sighand_struct *sig;
+ 
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 6500ef956f2f88..e57adc00cb779b 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -477,7 +477,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+ 	struct upid *upid;
+ 	pid_t nr = 0;
+ 
+-	if (pid && ns->level <= pid->level) {
++	if (pid && ns && ns->level <= pid->level) {
+ 		upid = &pid->numbers[ns->level];
+ 		if (upid->ns == ns)
+ 			nr = upid->nr;
+diff --git a/kernel/rseq.c b/kernel/rseq.c
+index 23894ba8250cf9..810005f927d7cc 100644
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -255,12 +255,12 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
+ 
+ 	/*
+ 	 * Load and clear event mask atomically with respect to
+-	 * scheduler preemption.
++	 * scheduler preemption and membarrier IPIs.
+ 	 */
+-	preempt_disable();
+-	event_mask = t->rseq_event_mask;
+-	t->rseq_event_mask = 0;
+-	preempt_enable();
++	scoped_guard(RSEQ_EVENT_GUARD) {
++		event_mask = t->rseq_event_mask;
++		t->rseq_event_mask = 0;
++	}
+ 
+ 	return !!event_mask;
+ }
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index a15cf7969953a5..5bb9735e19d2f5 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2214,6 +2214,25 @@ static int find_later_rq(struct task_struct *task)
+ 	return -1;
+ }
+ 
++static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
++{
++	struct task_struct *p;
++
++	if (!has_pushable_dl_tasks(rq))
++		return NULL;
++
++	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
++
++	WARN_ON_ONCE(rq->cpu != task_cpu(p));
++	WARN_ON_ONCE(task_current(rq, p));
++	WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
++
++	WARN_ON_ONCE(!task_on_rq_queued(p));
++	WARN_ON_ONCE(!dl_task(p));
++
++	return p;
++}
++
+ /* Locks the rq it finds */
+ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ {
+@@ -2241,12 +2260,37 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 
+ 		/* Retry if something changed. */
+ 		if (double_lock_balance(rq, later_rq)) {
+-			if (unlikely(task_rq(task) != rq ||
++			/*
++			 * double_lock_balance had to release rq->lock, in the
++			 * meantime, task may no longer be fit to be migrated.
++			 * Check the following to ensure that the task is
++			 * still suitable for migration:
++			 * 1. It is possible the task was scheduled,
++			 *    migrate_disabled was set and then got preempted,
++			 *    so we must check the task migration disable
++			 *    flag.
++			 * 2. The CPU picked is in the task's affinity.
++			 * 3. For throttled task (dl_task_offline_migration),
++			 *    check the following:
++			 *    - the task is not on the rq anymore (it was
++			 *      migrated)
++			 *    - the task is not on CPU anymore
++			 *    - the task is still a dl task
++			 *    - the task is not queued on the rq anymore
++			 * 4. For the non-throttled task (push_dl_task), the
++			 *    check to ensure that this task is still at the
++			 *    head of the pushable tasks list is enough.
++			 */
++			if (unlikely(is_migration_disabled(task) ||
+ 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
+-				     task_on_cpu(rq, task) ||
+-				     !dl_task(task) ||
+-				     is_migration_disabled(task) ||
+-				     !task_on_rq_queued(task))) {
++				     (task->dl.dl_throttled &&
++				      (task_rq(task) != rq ||
++				       task_on_cpu(rq, task) ||
++				       !dl_task(task) ||
++				       !task_on_rq_queued(task))) ||
++				     (!task->dl.dl_throttled &&
++				      task != pick_next_pushable_dl_task(rq)))) {
++
+ 				double_unlock_balance(rq, later_rq);
+ 				later_rq = NULL;
+ 				break;
+@@ -2269,25 +2313,6 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 	return later_rq;
+ }
+ 
+-static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
+-{
+-	struct task_struct *p;
+-
+-	if (!has_pushable_dl_tasks(rq))
+-		return NULL;
+-
+-	p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
+-
+-	WARN_ON_ONCE(rq->cpu != task_cpu(p));
+-	WARN_ON_ONCE(task_current(rq, p));
+-	WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
+-
+-	WARN_ON_ONCE(!task_on_rq_queued(p));
+-	WARN_ON_ONCE(!dl_task(p));
+-
+-	return p;
+-}
+-
+ /*
+  * See if the non running -deadline tasks on this rq
+  * can be sent to some other CPU where they can preempt
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 355de0b65c2358..47cb10a16b009d 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1689,6 +1689,7 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
+ 	struct rlimit old, new;
+ 	struct task_struct *tsk;
+ 	unsigned int checkflags = 0;
++	bool need_tasklist;
+ 	int ret;
+ 
+ 	if (old_rlim)
+@@ -1715,8 +1716,25 @@ SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
+ 	get_task_struct(tsk);
+ 	rcu_read_unlock();
+ 
+-	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
+-			old_rlim ? &old : NULL);
++	need_tasklist = !same_thread_group(tsk, current);
++	if (need_tasklist) {
++		/*
++		 * Ensure we can't race with group exit or de_thread(),
++		 * so tsk->group_leader can't be freed or changed until
++		 * read_unlock(tasklist_lock) below.
++		 */
++		read_lock(&tasklist_lock);
++		if (!pid_alive(tsk))
++			ret = -ESRCH;
++	}
++
++	if (!ret) {
++		ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
++				old_rlim ? &old : NULL);
++	}
++
++	if (need_tasklist)
++		read_unlock(&tasklist_lock);
+ 
+ 	if (!ret && old_rlim) {
+ 		rlim_to_rlim64(&old, &old64);
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 93620a78358769..a233262f858c86 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -342,12 +342,14 @@ static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ 			     void *entry_data)
+ {
+ 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++	unsigned int flags = trace_probe_load_flag(&tf->tp);
+ 	int ret = 0;
+ 
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		fentry_trace_func(tf, entry_ip, regs);
++
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret = fentry_perf_func(tf, entry_ip, regs);
+ #endif
+ 	return ret;
+@@ -359,11 +361,12 @@ static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ 			     void *entry_data)
+ {
+ 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++	unsigned int flags = trace_probe_load_flag(&tf->tp);
+ 
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #endif
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 46491f3c1569cd..20dbe40ab15e8e 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1713,14 +1713,15 @@ static int kprobe_register(struct trace_event_call *event,
+ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
+ {
+ 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
++	unsigned int flags = trace_probe_load_flag(&tk->tp);
+ 	int ret = 0;
+ 
+ 	raw_cpu_inc(*tk->nhit);
+ 
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++	if (flags & TP_FLAG_TRACE)
+ 		kprobe_trace_func(tk, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret = kprobe_perf_func(tk, regs);
+ #endif
+ 	return ret;
+@@ -1732,6 +1733,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
+ {
+ 	struct kretprobe *rp = get_kretprobe(ri);
+ 	struct trace_kprobe *tk;
++	unsigned int flags;
+ 
+ 	/*
+ 	 * There is a small chance that get_kretprobe(ri) returns NULL when
+@@ -1744,10 +1746,11 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
+ 	tk = container_of(rp, struct trace_kprobe, rp);
+ 	raw_cpu_inc(*tk->nhit);
+ 
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tk->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		kretprobe_trace_func(tk, ri, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		kretprobe_perf_func(tk, ri, regs);
+ #endif
+ 	return 0;	/* We don't tweak kernel, so just return 0 */
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 48afed3c3f88ec..c71fa9c2f3815b 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -268,16 +268,21 @@ struct event_file_link {
+ 	struct list_head		list;
+ };
+ 
++static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
++{
++	return smp_load_acquire(&tp->event->flags);
++}
++
+ static inline bool trace_probe_test_flag(struct trace_probe *tp,
+ 					 unsigned int flag)
+ {
+-	return !!(tp->event->flags & flag);
++	return !!(trace_probe_load_flag(tp) & flag);
+ }
+ 
+ static inline void trace_probe_set_flag(struct trace_probe *tp,
+ 					unsigned int flag)
+ {
+-	tp->event->flags |= flag;
++	smp_store_release(&tp->event->flags, tp->event->flags | flag);
+ }
+ 
+ static inline void trace_probe_clear_flag(struct trace_probe *tp,
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index ecf04e81ddf705..03d79a9ebd8c82 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1514,6 +1514,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ 	struct trace_uprobe *tu;
+ 	struct uprobe_dispatch_data udd;
+ 	struct uprobe_cpu_buffer *ucb = NULL;
++	unsigned int flags;
+ 	int ret = 0;
+ 
+ 	tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1527,11 +1528,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ 		return 0;
+ 
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tu->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		ret |= uprobe_trace_func(tu, regs, &ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		ret |= uprobe_perf_func(tu, regs, &ucb);
+ #endif
+ 	uprobe_buffer_put(ucb);
+@@ -1544,6 +1546,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ 	struct trace_uprobe *tu;
+ 	struct uprobe_dispatch_data udd;
+ 	struct uprobe_cpu_buffer *ucb = NULL;
++	unsigned int flags;
+ 
+ 	tu = container_of(con, struct trace_uprobe, consumer);
+ 
+@@ -1555,11 +1558,12 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ 		return 0;
+ 
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++	flags = trace_probe_load_flag(&tu->tp);
++	if (flags & TP_FLAG_TRACE)
+ 		uretprobe_trace_func(tu, func, regs, &ucb);
+ 
+ #ifdef CONFIG_PERF_EVENTS
+-	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++	if (flags & TP_FLAG_PROFILE)
+ 		uretprobe_perf_func(tu, func, regs, &ucb);
+ #endif
+ 	uprobe_buffer_put(ucb);
+diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
+index 8d1446c2be7193..88246000c9d8eb 100644
+--- a/lib/crypto/Makefile
++++ b/lib/crypto/Makefile
+@@ -30,6 +30,10 @@ obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC)	+= libcurve25519-generic.o
+ libcurve25519-generic-y				:= curve25519-fiat32.o
+ libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128)	:= curve25519-hacl64.o
+ libcurve25519-generic-y				+= curve25519-generic.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(call clang-min-version, 180000),)
++KASAN_SANITIZE_curve25519-hacl64.o := n
++endif
+ 
+ obj-$(CONFIG_CRYPTO_LIB_CURVE25519)		+= libcurve25519.o
+ libcurve25519-y					+= curve25519.o
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index 4fa5635bf81bd6..841f2978383334 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -899,8 +899,11 @@ struct gen_pool *of_gen_pool_get(struct device_node *np,
+ 		if (!name)
+ 			name = of_node_full_name(np_pool);
+ 	}
+-	if (pdev)
++	if (pdev) {
+ 		pool = gen_pool_get(&pdev->dev, name);
++		put_device(&pdev->dev);
++	}
++
+ 	of_node_put(np_pool);
+ 
+ 	return pool;
+diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
+index 5764b9885e7d21..4d7dc9f65f682e 100644
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -324,10 +324,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
+ 	}
+ 
+ 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+-	if (!pte) {
+-		walk->action = ACTION_AGAIN;
++	if (!pte)
+ 		return 0;
+-	}
+ 	if (!pte_present(ptep_get(pte)))
+ 		goto out;
+ 	damon_ptep_mkold(pte, walk->vma, addr);
+@@ -479,10 +477,8 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
+ #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
+ 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+-	if (!pte) {
+-		walk->action = ACTION_AGAIN;
++	if (!pte)
+ 		return 0;
+-	}
+ 	ptent = ptep_get(pte);
+ 	if (!pte_present(ptent))
+ 		goto out;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0c6745a6a13a59..532a840a426646 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3298,6 +3298,9 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+ 		return;
+ 	}
+ 
++	if (!h->max_huge_pages)
++		return;
++
+ 	/* do node specific alloc */
+ 	for_each_online_node(i) {
+ 		if (h->max_huge_pages_node[i] > 0) {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 44011ebecddf01..93cb65c6553ec3 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3738,7 +3738,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
+ 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
+ 			alloc_flags |= ALLOC_NON_BLOCK;
+ 
+-			if (order > 0)
++			if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
+ 				alloc_flags |= ALLOC_HIGHATOMIC;
+ 		}
+ 
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index a1c22eab71ffec..cc54b8267bcc7b 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1455,7 +1455,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
+ 	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
+ 		return;
+ 
+-	vg = br_vlan_group(br);
++	vg = br_vlan_group_rcu(br);
+ 
+ 	if (idx >= 0 &&
+ 	    ctx->vlan[idx].proto == br->vlan_proto) {
+diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
+index bd4d1b4d745f64..affb740c8685eb 100644
+--- a/net/bridge/netfilter/nft_meta_bridge.c
++++ b/net/bridge/netfilter/nft_meta_bridge.c
+@@ -168,8 +168,7 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
+ }
+ 
+ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
+-					const struct nft_expr *expr,
+-					const struct nft_data **data)
++					const struct nft_expr *expr)
+ {
+ 	struct nft_meta *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+@@ -179,7 +178,7 @@ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
+ 		hooks = 1 << NF_BR_PRE_ROUTING;
+ 		break;
+ 	default:
+-		return nft_meta_set_validate(ctx, expr, data);
++		return nft_meta_set_validate(ctx, expr);
+ 	}
+ 
+ 	return nft_chain_validate_hooks(ctx->chain, hooks);
+diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
+index 71b54fed7263df..1cb5c16e97b7fa 100644
+--- a/net/bridge/netfilter/nft_reject_bridge.c
++++ b/net/bridge/netfilter/nft_reject_bridge.c
+@@ -170,8 +170,7 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+-				      const struct nft_expr *expr,
+-				      const struct nft_data **data)
++				      const struct nft_expr *expr)
+ {
+ 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
+ 						    (1 << NF_BR_LOCAL_IN));
+diff --git a/net/core/filter.c b/net/core/filter.c
+index b6dbcef649654c..c2e888ea54abbf 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2273,6 +2273,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+ 		if (IS_ERR(dst))
+ 			goto out_drop;
+ 
++		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	} else if (nh->nh_family != AF_INET6) {
+ 		goto out_drop;
+@@ -2382,6 +2383,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+ 			goto out_drop;
+ 		}
+ 
++		skb_dst_drop(skb);
+ 		skb_dst_set(skb, &rt->dst);
+ 	}
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7d824578f217a8..5dde0aed314405 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1724,6 +1724,7 @@ EXPORT_SYMBOL(tcp_peek_len);
+ /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
+ int tcp_set_rcvlowat(struct sock *sk, int val)
+ {
++	struct tcp_sock *tp = tcp_sk(sk);
+ 	int space, cap;
+ 
+ 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+@@ -1742,7 +1743,9 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
+ 	space = tcp_space_from_win(sk, val);
+ 	if (space > sk->sk_rcvbuf) {
+ 		WRITE_ONCE(sk->sk_rcvbuf, space);
+-		WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
++
++		if (tp->window_clamp && tp->window_clamp < val)
++			WRITE_ONCE(tp->window_clamp, val);
+ 	}
+ 	return 0;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c6d00817ad3fd9..8834cd41b38408 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7200,7 +7200,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 				    &foc, TCP_SYNACK_FASTOPEN, skb);
+ 		/* Add the child socket directly into the accept queue */
+ 		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+-			reqsk_fastopen_remove(fastopen_sk, req, false);
+ 			bh_unlock_sock(fastopen_sk);
+ 			sock_put(fastopen_sk);
+ 			goto drop_and_free;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 29c167e5fc0255..ab7bdb6531816d 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -227,9 +227,12 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
+ 		} else {
+ 			__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ 		}
+-	/* id0 should not have a different address */
++	/* - id0 should not have a different address
++	 * - special case for C-flag: linked to fill_local_addresses_vec()
++	 */
+ 	} else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
+-		   (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
++		   (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
++		    !mptcp_pm_add_addr_c_flag_case(msk))) {
+ 		mptcp_pm_announce_addr(msk, addr, true);
+ 		mptcp_pm_add_addr_send_ack(msk);
+ 	} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index de24989b05a600..9d2c38421f7a2b 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -675,10 +675,12 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	struct mptcp_addr_info mpc_addr;
+ 	struct pm_nl_pernet *pernet;
+ 	unsigned int subflows_max;
++	bool c_flag_case;
+ 	int i = 0;
+ 
+ 	pernet = pm_nl_get_pernet_from_msk(msk);
+ 	subflows_max = mptcp_pm_get_subflows_max(msk);
++	c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk);
+ 
+ 	mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+ 
+@@ -691,11 +693,26 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 			continue;
+ 
+ 		if (msk->pm.subflows < subflows_max) {
++			bool is_id0;
++
+ 			msk->pm.subflows++;
+ 			addrs[i] = entry->addr;
+ 
++			is_id0 = mptcp_addresses_equal(&entry->addr,
++						       &mpc_addr,
++						       entry->addr.port);
++
++			if (c_flag_case &&
++			    (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) {
++				__clear_bit(addrs[i].id,
++					    msk->pm.id_avail_bitmap);
++
++				if (!is_id0)
++					msk->pm.local_addr_used++;
++			}
++
+ 			/* Special case for ID0: set the correct ID */
+-			if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port))
++			if (is_id0)
+ 				addrs[i].id = 0;
+ 
+ 			i++;
+@@ -703,6 +720,39 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
+ 	}
+ 	rcu_read_unlock();
+ 
++	/* Special case: peer sets the C flag, accept one ADD_ADDR if default
++	 * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints
++	 */
++	if (!i && c_flag_case) {
++		unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk);
++
++		while (msk->pm.local_addr_used < local_addr_max &&
++		       msk->pm.subflows < subflows_max) {
++			struct mptcp_pm_addr_entry local;
++
++			if (!select_local_address(pernet, msk, &local))
++				break;
++
++			__clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
++
++			if (!mptcp_pm_addr_families_match(sk, &local.addr,
++							  remote))
++				continue;
++
++			if (mptcp_addresses_equal(&local.addr, &mpc_addr,
++						  local.addr.port))
++				continue;
++
++			addrs[i] = local.addr;
++
++			msk->pm.local_addr_used++;
++			msk->pm.subflows++;
++			i++;
++		}
++
++		return i;
++	}
++
+ 	/* If the array is empty, fill in the single
+ 	 * 'IPADDRANY' local address
+ 	 */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index c5f41cdb36c4b1..1f213706dfaa52 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1080,6 +1080,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
+ 	spin_unlock_bh(&msk->pm.lock);
+ }
+ 
++static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk)
++{
++	return READ_ONCE(msk->pm.remote_deny_join_id0) &&
++	       msk->pm.local_addr_used == 0 &&
++	       mptcp_pm_get_add_addr_accept_max(msk) == 0 &&
++	       msk->pm.subflows < mptcp_pm_get_subflows_max(msk);
++}
++
+ void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4ffb5ef79ca13f..5ca1d775e976d4 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3793,7 +3793,6 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ {
+ 	struct nft_expr *expr, *last;
+-	const struct nft_data *data;
+ 	struct nft_rule *rule;
+ 	int err;
+ 
+@@ -3814,7 +3813,7 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ 			/* This may call nft_chain_validate() recursively,
+ 			 * callers that do so must increment ctx->level.
+ 			 */
+-			err = expr->ops->validate(ctx, expr, &data);
++			err = expr->ops->validate(ctx, expr);
+ 			if (err < 0)
+ 				return err;
+ 		}
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index d3d11dede54507..52cdfee17f73f1 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -350,8 +350,7 @@ static int nft_target_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_target_validate(const struct nft_ctx *ctx,
+-			       const struct nft_expr *expr,
+-			       const struct nft_data **data)
++			       const struct nft_expr *expr)
+ {
+ 	struct xt_target *target = expr->ops->data;
+ 	unsigned int hook_mask = 0;
+@@ -611,8 +610,7 @@ static int nft_match_large_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_match_validate(const struct nft_ctx *ctx,
+-			      const struct nft_expr *expr,
+-			      const struct nft_data **data)
++			      const struct nft_expr *expr)
+ {
+ 	struct xt_match *match = expr->ops->data;
+ 	unsigned int hook_mask = 0;
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index bf825f6cb974ea..507b7eba9bcdcb 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -26,8 +26,7 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = {
+ };
+ EXPORT_SYMBOL(nft_fib_policy);
+ 
+-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+-		     const struct nft_data **data)
++int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+ 	const struct nft_fib *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 5a3d6854204202..1894032a7971c6 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -385,8 +385,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+-				     const struct nft_expr *expr,
+-				     const struct nft_data **data)
++				     const struct nft_expr *expr)
+ {
+ 	unsigned int hook_mask = (1 << NF_INET_FORWARD);
+ 
+diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
+index a5268e6dd32f1b..fa9e4ae00b16a9 100644
+--- a/net/netfilter/nft_fwd_netdev.c
++++ b/net/netfilter/nft_fwd_netdev.c
+@@ -204,8 +204,7 @@ static int nft_fwd_neigh_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_fwd_validate(const struct nft_ctx *ctx,
+-			    const struct nft_expr *expr,
+-			    const struct nft_data **data)
++			    const struct nft_expr *expr)
+ {
+ 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) |
+ 						    (1 << NF_NETDEV_EGRESS));
+diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
+index ac2422c215e545..02ee5fb69871f8 100644
+--- a/net/netfilter/nft_immediate.c
++++ b/net/netfilter/nft_immediate.c
+@@ -244,8 +244,7 @@ static int nft_immediate_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_immediate_validate(const struct nft_ctx *ctx,
+-				  const struct nft_expr *expr,
+-				  const struct nft_data **d)
++				  const struct nft_expr *expr)
+ {
+ 	const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ 	struct nft_ctx *pctx = (struct nft_ctx *)ctx;
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index 1b9edf2b339373..dd5441f92fdb06 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -206,8 +206,7 @@ static int nft_lookup_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_lookup_validate(const struct nft_ctx *ctx,
+-			       const struct nft_expr *expr,
+-			       const struct nft_data **d)
++			       const struct nft_expr *expr)
+ {
+ 	const struct nft_lookup *priv = nft_expr_priv(expr);
+ 	struct nft_set_iter iter;
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index 8a14aaca93bbdf..eee05394c53390 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -27,8 +27,7 @@ static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
+ };
+ 
+ static int nft_masq_validate(const struct nft_ctx *ctx,
+-			     const struct nft_expr *expr,
+-			     const struct nft_data **data)
++			     const struct nft_expr *expr)
+ {
+ 	int err;
+ 
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 9139ce38ea7b9a..dec76d28a0ac64 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -581,8 +581,7 @@ static int nft_meta_get_validate_xfrm(const struct nft_ctx *ctx)
+ }
+ 
+ static int nft_meta_get_validate(const struct nft_ctx *ctx,
+-				 const struct nft_expr *expr,
+-				 const struct nft_data **data)
++				 const struct nft_expr *expr)
+ {
+ 	const struct nft_meta *priv = nft_expr_priv(expr);
+ 
+@@ -600,8 +599,7 @@ static int nft_meta_get_validate(const struct nft_ctx *ctx,
+ }
+ 
+ int nft_meta_set_validate(const struct nft_ctx *ctx,
+-			  const struct nft_expr *expr,
+-			  const struct nft_data **data)
++			  const struct nft_expr *expr)
+ {
+ 	struct nft_meta *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 808f5802c2704a..3d3e639a7a8370 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -137,8 +137,7 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
+ };
+ 
+ static int nft_nat_validate(const struct nft_ctx *ctx,
+-			    const struct nft_expr *expr,
+-			    const struct nft_data **data)
++			    const struct nft_expr *expr)
+ {
+ 	struct nft_nat *priv = nft_expr_priv(expr);
+ 	int err;
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 509011b1ef597c..08a27433e2f5fc 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -22,6 +22,35 @@ void nft_objref_eval(const struct nft_expr *expr,
+ 	obj->ops->eval(obj, regs, pkt);
+ }
+ 
++static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type)
++{
++	unsigned int hooks;
++
++	switch (type) {
++	case NFT_OBJECT_SYNPROXY:
++		if (ctx->family != NFPROTO_IPV4 &&
++		    ctx->family != NFPROTO_IPV6 &&
++		    ctx->family != NFPROTO_INET)
++			return -EOPNOTSUPP;
++
++		hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD);
++
++		return nft_chain_validate_hooks(ctx->chain, hooks);
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++static int nft_objref_validate(const struct nft_ctx *ctx,
++			       const struct nft_expr *expr)
++{
++	struct nft_object *obj = nft_objref_priv(expr);
++
++	return nft_objref_validate_obj_type(ctx, obj->ops->type->type);
++}
++
+ static int nft_objref_init(const struct nft_ctx *ctx,
+ 			   const struct nft_expr *expr,
+ 			   const struct nlattr * const tb[])
+@@ -93,6 +122,7 @@ static const struct nft_expr_ops nft_objref_ops = {
+ 	.activate	= nft_objref_activate,
+ 	.deactivate	= nft_objref_deactivate,
+ 	.dump		= nft_objref_dump,
++	.validate	= nft_objref_validate,
+ 	.reduce		= NFT_REDUCE_READONLY,
+ };
+ 
+@@ -198,6 +228,14 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+ 	nf_tables_destroy_set(ctx, priv->set);
+ }
+ 
++static int nft_objref_map_validate(const struct nft_ctx *ctx,
++				   const struct nft_expr *expr)
++{
++	const struct nft_objref_map *priv = nft_expr_priv(expr);
++
++	return nft_objref_validate_obj_type(ctx, priv->set->objtype);
++}
++
+ static const struct nft_expr_ops nft_objref_map_ops = {
+ 	.type		= &nft_objref_type,
+ 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
+@@ -207,6 +245,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
+ 	.deactivate	= nft_objref_map_deactivate,
+ 	.destroy	= nft_objref_map_destroy,
+ 	.dump		= nft_objref_map_dump,
++	.validate	= nft_objref_map_validate,
+ 	.reduce		= NFT_REDUCE_READONLY,
+ };
+ 
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index 7f61506e5b44bd..123b4f47ccef4c 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -113,8 +113,7 @@ static int nft_osf_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_osf_validate(const struct nft_ctx *ctx,
+-			    const struct nft_expr *expr,
+-			    const struct nft_data **data)
++			    const struct nft_expr *expr)
+ {
+ 	unsigned int hooks;
+ 
+diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
+index b2b8127c8d4381..b8ebb187814f29 100644
+--- a/net/netfilter/nft_queue.c
++++ b/net/netfilter/nft_queue.c
+@@ -69,8 +69,7 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_queue_validate(const struct nft_ctx *ctx,
+-			      const struct nft_expr *expr,
+-			      const struct nft_data **data)
++			      const struct nft_expr *expr)
+ {
+ 	static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
+ 						     (1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index a58bd8d291ff2d..9051863509f319 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -27,8 +27,7 @@ static const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
+ };
+ 
+ static int nft_redir_validate(const struct nft_ctx *ctx,
+-			      const struct nft_expr *expr,
+-			      const struct nft_data **data)
++			      const struct nft_expr *expr)
+ {
+ 	int err;
+ 
+diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
+index ed2e668474d677..196a92c7ea09b7 100644
+--- a/net/netfilter/nft_reject.c
++++ b/net/netfilter/nft_reject.c
+@@ -24,8 +24,7 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+ EXPORT_SYMBOL_GPL(nft_reject_policy);
+ 
+ int nft_reject_validate(const struct nft_ctx *ctx,
+-			const struct nft_expr *expr,
+-			const struct nft_data **data)
++			const struct nft_expr *expr)
+ {
+ 	return nft_chain_validate_hooks(ctx->chain,
+ 					(1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
+index 973fa31a9dd6cd..49020e67304ad7 100644
+--- a/net/netfilter/nft_reject_inet.c
++++ b/net/netfilter/nft_reject_inet.c
+@@ -61,8 +61,7 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_reject_inet_validate(const struct nft_ctx *ctx,
+-				    const struct nft_expr *expr,
+-				    const struct nft_data **data)
++				    const struct nft_expr *expr)
+ {
+ 	return nft_chain_validate_hooks(ctx->chain,
+ 					(1 << NF_INET_LOCAL_IN) |
+diff --git a/net/netfilter/nft_reject_netdev.c b/net/netfilter/nft_reject_netdev.c
+index 7865cd8b11bb6a..2558ce1505d989 100644
+--- a/net/netfilter/nft_reject_netdev.c
++++ b/net/netfilter/nft_reject_netdev.c
+@@ -145,8 +145,7 @@ static void nft_reject_netdev_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
+-				      const struct nft_expr *expr,
+-				      const struct nft_data **data)
++				      const struct nft_expr *expr)
+ {
+ 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+ }
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 2434c624aafde1..9525f1e02a7d83 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -160,8 +160,7 @@ static int nft_rt_get_dump(struct sk_buff *skb,
+ 	return -1;
+ }
+ 
+-static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+-			   const struct nft_data **data)
++static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+ 	const struct nft_rt *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index 187b667bad6c34..35d0409b009501 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -275,8 +275,7 @@ static bool nft_socket_reduce(struct nft_regs_track *track,
+ }
+ 
+ static int nft_socket_validate(const struct nft_ctx *ctx,
+-			       const struct nft_expr *expr,
+-			       const struct nft_data **data)
++			       const struct nft_expr *expr)
+ {
+ 	if (ctx->family != NFPROTO_IPV4 &&
+ 	    ctx->family != NFPROTO_IPV6 &&
+diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
+index 1d737f89dfc18c..5d3e5182598596 100644
+--- a/net/netfilter/nft_synproxy.c
++++ b/net/netfilter/nft_synproxy.c
+@@ -248,8 +248,7 @@ static void nft_synproxy_eval(const struct nft_expr *expr,
+ }
+ 
+ static int nft_synproxy_validate(const struct nft_ctx *ctx,
+-				 const struct nft_expr *expr,
+-				 const struct nft_data **data)
++				 const struct nft_expr *expr)
+ {
+ 	if (ctx->family != NFPROTO_IPV4 &&
+ 	    ctx->family != NFPROTO_IPV6 &&
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index 71412adb73d414..ed344af2a439be 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -313,8 +313,7 @@ static int nft_tproxy_dump(struct sk_buff *skb,
+ }
+ 
+ static int nft_tproxy_validate(const struct nft_ctx *ctx,
+-			       const struct nft_expr *expr,
+-			       const struct nft_data **data)
++			       const struct nft_expr *expr)
+ {
+ 	if (ctx->family != NFPROTO_IPV4 &&
+ 	    ctx->family != NFPROTO_IPV6 &&
+diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
+index 1c866757db5524..8a07b46cc8fb73 100644
+--- a/net/netfilter/nft_xfrm.c
++++ b/net/netfilter/nft_xfrm.c
+@@ -229,8 +229,7 @@ static int nft_xfrm_get_dump(struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+-			     const struct nft_data **data)
++static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ {
+ 	const struct nft_xfrm *priv = nft_expr_priv(expr);
+ 	unsigned int hooks;
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 08527d882e56ef..eb2ed7db3fe16b 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -31,6 +31,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/ip.h>
+@@ -1796,7 +1797,7 @@ struct sctp_association *sctp_unpack_cookie(
+ 		}
+ 	}
+ 
+-	if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
++	if (crypto_memneq(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
+ 		*error = -SCTP_IERROR_BAD_SIG;
+ 		goto fail;
+ 	}
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 808863e047e0c0..cd18b22b2bbaee 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -30,6 +30,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <crypto/utils.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/ip.h>
+@@ -884,7 +885,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ 	return SCTP_DISPOSITION_CONSUME;
+ 
+ nomem_authev:
+-	sctp_ulpevent_free(ai_ev);
++	if (ai_ev)
++		sctp_ulpevent_free(ai_ev);
+ nomem_aiev:
+ 	sctp_ulpevent_free(ev);
+ nomem_ev:
+@@ -4415,7 +4417,7 @@ static enum sctp_ierror sctp_sf_authenticate(
+ 				 sh_key, GFP_ATOMIC);
+ 
+ 	/* Discard the packet if the digests do not match */
+-	if (memcmp(save_digest, digest, sig_len)) {
++	if (crypto_memneq(save_digest, digest, sig_len)) {
+ 		kfree(save_digest);
+ 		return SCTP_IERROR_BAD_SIG;
+ 	}
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index aa108bea6739b3..ddcbfaede6849a 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <crypto/hash_info.h>
++#include <crypto/utils.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/parser.h>
+@@ -241,7 +242,7 @@ int TSS_checkhmac1(unsigned char *buffer,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
++	if (crypto_memneq(testhmac, authdata, SHA1_DIGEST_SIZE))
+ 		ret = -EINVAL;
+ out:
+ 	kfree_sensitive(sdesc);
+@@ -334,7 +335,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
+ 			  TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
++	if (crypto_memneq(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -343,7 +344,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
+ 			  TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
++	if (crypto_memneq(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+ 		ret = -EINVAL;
+ out:
+ 	kfree_sensitive(sdesc);
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 0fb759c6eeaf94..07bd84204baa7f 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -59,8 +59,8 @@
+ 
+ #define SOF_IPC4_INVALID_NODE_ID	0xffffffff
+ 
+-/* FW requires minimum 2ms DMA buffer size */
+-#define SOF_IPC4_MIN_DMA_BUFFER_SIZE	2
++/* FW requires minimum 4ms DMA buffer size */
++#define SOF_IPC4_MIN_DMA_BUFFER_SIZE	4
+ 
+ /*
+  * The base of multi-gateways. Multi-gateways addressing starts from
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 4f869fff28fe43..17e03236e3461c 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -306,10 +306,10 @@ $(OUTPUT)test-libbabeltrace.bin:
+ 	$(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
+ 
+ $(OUTPUT)test-compile-32.bin:
+-	$(CC) -m32 -o $@ test-compile.c
++	$(CC) -m32 -Wall -Werror -o $@ test-compile.c
+ 
+ $(OUTPUT)test-compile-x32.bin:
+-	$(CC) -mx32 -o $@ test-compile.c
++	$(CC) -mx32 -Wall -Werror -o $@ test-compile.c
+ 
+ $(OUTPUT)test-zlib.bin:
+ 	$(BUILD) -lz
+diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
+index ae64090184d328..b50960cce23f24 100644
+--- a/tools/lib/perf/include/perf/event.h
++++ b/tools/lib/perf/include/perf/event.h
+@@ -285,6 +285,7 @@ struct perf_record_header_event_type {
+ struct perf_record_header_tracing_data {
+ 	struct perf_event_header header;
+ 	__u32			 size;
++	__u32			 pad;
+ };
+ 
+ #define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 9692ebdd7f11e9..1512fedd90cf9a 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -638,8 +638,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 	 * (behavior changed with commit b0a873e).
+ 	 */
+ 	if (errno == EINVAL || errno == ENOSYS ||
+-	    errno == ENOENT || errno == EOPNOTSUPP ||
+-	    errno == ENXIO) {
++	    errno == ENOENT || errno == ENXIO) {
+ 		if (verbose > 0)
+ 			ui__warning("%s event is not supported by the kernel.\n",
+ 				    evsel__name(counter));
+@@ -657,7 +656,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 		if (verbose > 0)
+ 			ui__warning("%s\n", msg);
+ 		return COUNTER_RETRY;
+-	} else if (target__has_per_thread(&target) &&
++	} else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
+ 		   evsel_list->core.threads &&
+ 		   evsel_list->core.threads->err_thread != -1) {
+ 		/*
+@@ -678,6 +677,19 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
+ 		return COUNTER_SKIP;
+ 	}
+ 
++	if (errno == EOPNOTSUPP) {
++		if (verbose > 0) {
++			ui__warning("%s event is not supported by the kernel.\n",
++				    evsel__name(counter));
++		}
++		counter->supported = false;
++		counter->errored = true;
++
++		if ((evsel__leader(counter) != counter) ||
++		    !(counter->core.leader->nr_members > 1))
++			return COUNTER_SKIP;
++	}
++
+ 	evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
+ 	ui__error("%s\n", msg);
+ 
+diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
+index 1c4feec1adff11..6e7f053006b4ff 100644
+--- a/tools/perf/tests/perf-record.c
++++ b/tools/perf/tests/perf-record.c
+@@ -115,6 +115,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("sched__get_first_possible_cpu: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -126,6 +127,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
+ 		pr_debug("sched_setaffinity: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -137,6 +139,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("perf_evlist__open: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+@@ -149,6 +152,7 @@ static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest
+ 	if (err < 0) {
+ 		pr_debug("evlist__mmap: %s\n",
+ 			 str_error_r(errno, sbuf, sizeof(sbuf)));
++		evlist__cancel_workload(evlist);
+ 		goto out_delete_evlist;
+ 	}
+ 
+diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
+index 3f1e67795490a0..62f13dfeae8e4d 100755
+--- a/tools/perf/tests/shell/stat.sh
++++ b/tools/perf/tests/shell/stat.sh
+@@ -146,6 +146,34 @@ test_cputype() {
+   echo "cputype test [Success]"
+ }
+ 
++test_hybrid() {
++  # Test the default stat command on hybrid devices opens one cycles event for
++  # each CPU type.
++  echo "hybrid test"
++
++  # Count the number of core PMUs, assume minimum of 1
++  pmus=$(ls /sys/bus/event_source/devices/*/cpus 2>/dev/null | wc -l)
++  if [ "$pmus" -lt 1 ]
++  then
++    pmus=1
++  fi
++
++  # Run default Perf stat
++  cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*|  cycles[:uH]*  " -c)
++
++  # The expectation is that default output will have a cycles events on each
++  # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
++  # can fall back to task-clock and so the end count may be 0. Fail if neither
++  # condition holds.
++  if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
++  then
++    echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
++    err=1
++    return
++  fi
++  echo "hybrid test [Success]"
++}
++
+ test_default_stat
+ test_stat_record_report
+ test_stat_record_script
+@@ -153,4 +181,5 @@ test_stat_repeat_weak_groups
+ test_topdown_groups
+ test_topdown_weak_groups
+ test_cputype
++test_hybrid
+ exit $err
+diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+index 1443c28545a946..358c611eeddbb3 100644
+--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
++++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+@@ -56,15 +56,15 @@ enum arm_spe_op_type {
+ 	ARM_SPE_OP_BR_INDIRECT	= 1 << 17,
+ };
+ 
+-enum arm_spe_neoverse_data_source {
+-	ARM_SPE_NV_L1D		 = 0x0,
+-	ARM_SPE_NV_L2		 = 0x8,
+-	ARM_SPE_NV_PEER_CORE	 = 0x9,
+-	ARM_SPE_NV_LOCAL_CLUSTER = 0xa,
+-	ARM_SPE_NV_SYS_CACHE	 = 0xb,
+-	ARM_SPE_NV_PEER_CLUSTER	 = 0xc,
+-	ARM_SPE_NV_REMOTE	 = 0xd,
+-	ARM_SPE_NV_DRAM		 = 0xe,
++enum arm_spe_common_data_source {
++	ARM_SPE_COMMON_DS_L1D		= 0x0,
++	ARM_SPE_COMMON_DS_L2		= 0x8,
++	ARM_SPE_COMMON_DS_PEER_CORE	= 0x9,
++	ARM_SPE_COMMON_DS_LOCAL_CLUSTER = 0xa,
++	ARM_SPE_COMMON_DS_SYS_CACHE	= 0xb,
++	ARM_SPE_COMMON_DS_PEER_CLUSTER	= 0xc,
++	ARM_SPE_COMMON_DS_REMOTE	= 0xd,
++	ARM_SPE_COMMON_DS_DRAM		= 0xe,
+ };
+ 
+ struct arm_spe_record {
+diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
+index 9848310cee5f35..0302e6407b5a20 100644
+--- a/tools/perf/util/arm-spe.c
++++ b/tools/perf/util/arm-spe.c
+@@ -411,15 +411,15 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
+ 	return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+ }
+ 
+-static const struct midr_range neoverse_spe[] = {
++static const struct midr_range common_ds_encoding_cpus[] = {
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ 	{},
+ };
+ 
+-static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
+-						union perf_mem_data_src *data_src)
++static void arm_spe__synth_data_source_common(const struct arm_spe_record *record,
++					      union perf_mem_data_src *data_src)
+ {
+ 	/*
+ 	 * Even though four levels of cache hierarchy are possible, no known
+@@ -441,17 +441,17 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	}
+ 
+ 	switch (record->source) {
+-	case ARM_SPE_NV_L1D:
++	case ARM_SPE_COMMON_DS_L1D:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ 		break;
+-	case ARM_SPE_NV_L2:
++	case ARM_SPE_COMMON_DS_L2:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+ 		break;
+-	case ARM_SPE_NV_PEER_CORE:
++	case ARM_SPE_COMMON_DS_PEER_CORE:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+@@ -460,8 +460,8 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
+ 	 * transfer, so set SNOOPX_PEER
+ 	 */
+-	case ARM_SPE_NV_LOCAL_CLUSTER:
+-	case ARM_SPE_NV_PEER_CLUSTER:
++	case ARM_SPE_COMMON_DS_LOCAL_CLUSTER:
++	case ARM_SPE_COMMON_DS_PEER_CLUSTER:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+@@ -469,7 +469,7 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	/*
+ 	 * System cache is assumed to be L3
+ 	 */
+-	case ARM_SPE_NV_SYS_CACHE:
++	case ARM_SPE_COMMON_DS_SYS_CACHE:
+ 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
+@@ -478,13 +478,13 @@ static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *rec
+ 	 * We don't know what level it hit in, except it came from the other
+ 	 * socket
+ 	 */
+-	case ARM_SPE_NV_REMOTE:
+-		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
+-		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
++	case ARM_SPE_COMMON_DS_REMOTE:
++		data_src->mem_lvl = PERF_MEM_LVL_NA;
++		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
+ 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
+ 		break;
+-	case ARM_SPE_NV_DRAM:
++	case ARM_SPE_COMMON_DS_DRAM:
+ 		data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
+ 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+ 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
+@@ -514,13 +514,13 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco
+ 	}
+ 
+ 	if (record->type & ARM_SPE_REMOTE_ACCESS)
+-		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
++		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
+ }
+ 
+ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
+ {
+ 	union perf_mem_data_src	data_src = { .mem_op = PERF_MEM_OP_NA };
+-	bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
++	bool is_common = is_midr_in_range_list(midr, common_ds_encoding_cpus);
+ 
+ 	/* Only synthesize data source for LDST operations */
+ 	if (!is_ldst_op(record->op))
+@@ -533,8 +533,8 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m
+ 	else
+ 		return 0;
+ 
+-	if (is_neoverse)
+-		arm_spe__synth_data_source_neoverse(record, &data_src);
++	if (is_common)
++		arm_spe__synth_data_source_common(record, &data_src);
+ 	else
+ 		arm_spe__synth_data_source_generic(record, &data_src);
+ 
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 2a6295f1ac1bc0..6d1327f8c6043a 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -2874,7 +2874,7 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+ 
+ 		/* If event has exclude user then don't exclude kernel. */
+ 		if (evsel->core.attr.exclude_user)
+-			return false;
++			goto no_fallback;
+ 
+ 		/* Is there already the separator in the name. */
+ 		if (strchr(name, '/') ||
+@@ -2882,7 +2882,7 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+ 			sep = "";
+ 
+ 		if (asprintf(&new_name, "%s%su", name, sep) < 0)
+-			return false;
++			goto no_fallback;
+ 
+ 		free(evsel->name);
+ 		evsel->name = new_name;
+@@ -2893,8 +2893,31 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+ 		evsel->core.attr.exclude_hv     = 1;
+ 
+ 		return true;
+-	}
++	} else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
++		   !evsel->exclude_GH) {
++		const char *name = evsel__name(evsel);
++		char *new_name;
++		const char *sep = ":";
++
++		/* Is there already the separator in the name. */
++		if (strchr(name, '/') ||
++		    (strchr(name, ':') && !evsel->is_libpfm_event))
++			sep = "";
++
++		if (asprintf(&new_name, "%s%sH", name, sep) < 0)
++			goto no_fallback;
+ 
++		free(evsel->name);
++		evsel->name = new_name;
++		/* Apple M1 requires exclude_guest */
++		scnprintf(msg, msgsize, "Trying to fall back to excluding guest samples");
++		evsel->core.attr.exclude_guest = 1;
++
++		return true;
++	}
++no_fallback:
++	scnprintf(msg, msgsize, "No fallback found for '%s' for error %d",
++		  evsel__name(evsel), err);
+ 	return false;
+ }
+ 
+@@ -3131,6 +3154,8 @@ bool evsel__is_hybrid(const struct evsel *evsel)
+ 
+ struct evsel *evsel__leader(const struct evsel *evsel)
+ {
++	if (evsel->core.leader == NULL)
++		return NULL;
+ 	return container_of(evsel->core.leader, struct evsel, core);
+ }
+ 
+diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c
+index af9a97612f9df3..f61574d1581e3c 100644
+--- a/tools/perf/util/lzma.c
++++ b/tools/perf/util/lzma.c
+@@ -113,7 +113,7 @@ bool lzma_is_compressed(const char *input)
+ 	ssize_t rc;
+ 
+ 	if (fd < 0)
+-		return -1;
++		return false;
+ 
+ 	rc = read(fd, buf, sizeof(buf));
+ 	close(fd);
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 277b2cbd518611..e4d57e7df7ae10 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -1662,7 +1662,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
+ 	struct perf_tool *tool = session->tool;
+ 	struct perf_sample sample = { .time = 0, };
+ 	int fd = perf_data__fd(session->data);
+-	int err;
++	s64 err;
+ 
+ 	if (event->header.type != PERF_RECORD_COMPRESSED ||
+ 	    tool->compressed == perf_session__process_compressed_event_stub)
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index e837132d5031bd..eecb462c021cd9 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -1,6 +1,7 @@
+ from os import getenv, path
+ from subprocess import Popen, PIPE
+ from re import sub
++import shlex
+ 
+ cc = getenv("CC")
+ 
+@@ -16,7 +17,9 @@ cc_is_clang = b"clang version" in Popen([cc, "-v"], stderr=PIPE).stderr.readline
+ src_feature_tests  = getenv('srctree') + '/tools/build/feature'
+ 
+ def clang_has_option(option):
+-    cc_output = Popen([cc, cc_options + option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
++    cmd = shlex.split(f"{cc} {cc_options} {option}")
++    cmd.append(path.join(src_feature_tests, "test-hello.c"))
++    cc_output = Popen(cmd, stderr=PIPE).stderr.readlines()
+     return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o) or (b"unknown warning option" in o))] == [ ]
+ 
+ if cc_is_clang:
+diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
+index 78d2297c1b6746..1f7c065230599d 100644
+--- a/tools/perf/util/zlib.c
++++ b/tools/perf/util/zlib.c
+@@ -88,7 +88,7 @@ bool gzip_is_compressed(const char *input)
+ 	ssize_t rc;
+ 
+ 	if (fd < 0)
+-		return -1;
++		return false;
+ 
+ 	rc = read(fd, buf, sizeof(buf));
+ 	close(fd);
+diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
+index 17bcb07f19f349..7278623acf8c54 100644
+--- a/tools/testing/selftests/mm/madv_populate.c
++++ b/tools/testing/selftests/mm/madv_populate.c
+@@ -264,23 +264,6 @@ static void test_softdirty(void)
+ 	munmap(addr, SIZE);
+ }
+ 
+-static int system_has_softdirty(void)
+-{
+-	/*
+-	 * There is no way to check if the kernel supports soft-dirty, other
+-	 * than by writing to a page and seeing if the bit was set. But the
+-	 * tests are intended to check that the bit gets set when it should, so
+-	 * doing that check would turn a potentially legitimate fail into a
+-	 * skip. Fortunately, we know for sure that arm64 does not support
+-	 * soft-dirty. So for now, let's just use the arch as a corse guide.
+-	 */
+-#if defined(__aarch64__)
+-	return 0;
+-#else
+-	return 1;
+-#endif
+-}
+-
+ int main(int argc, char **argv)
+ {
+ 	int nr_tests = 16;
+@@ -288,7 +271,7 @@ int main(int argc, char **argv)
+ 
+ 	pagesize = getpagesize();
+ 
+-	if (system_has_softdirty())
++	if (softdirty_supported())
+ 		nr_tests += 5;
+ 
+ 	ksft_print_header();
+@@ -300,7 +283,7 @@ int main(int argc, char **argv)
+ 	test_holes();
+ 	test_populate_read();
+ 	test_populate_write();
+-	if (system_has_softdirty())
++	if (softdirty_supported())
+ 		test_softdirty();
+ 
+ 	err = ksft_get_fail_cnt();
+diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
+index 7dbfa53d93a05f..7095b95d19ae2d 100644
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -193,8 +193,11 @@ int main(int argc, char **argv)
+ 	int pagesize;
+ 
+ 	ksft_print_header();
+-	ksft_set_plan(15);
+ 
++	if (!softdirty_supported())
++		ksft_exit_skip("soft-dirty is not support\n");
++
++	ksft_set_plan(15);
+ 	pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
+ 	if (pagemap_fd < 0)
+ 		ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
+diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c
+index 558c9cd8901c5a..b2af70d75711aa 100644
+--- a/tools/testing/selftests/mm/vm_util.c
++++ b/tools/testing/selftests/mm/vm_util.c
+@@ -97,6 +97,42 @@ uint64_t read_pmd_pagesize(void)
+ 	return strtoul(buf, NULL, 10);
+ }
+ 
++char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len)
++{
++	int ret;
++	FILE *fp;
++	char *entry = NULL;
++	char addr_pattern[MAX_LINE_LENGTH];
++
++	ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
++		       (unsigned long)addr);
++	if (ret >= MAX_LINE_LENGTH)
++		ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
++
++	fp = fopen(SMAP_FILE_PATH, "r");
++	if (!fp)
++		ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__,
++				   SMAP_FILE_PATH);
++
++	if (!check_for_pattern(fp, addr_pattern, buf, len))
++		goto err_out;
++
++	/* Fetch the pattern in the same block */
++	if (!check_for_pattern(fp, pattern, buf, len))
++		goto err_out;
++
++	/* Trim trailing newline */
++	entry = strchr(buf, '\n');
++	if (entry)
++		*entry = '\0';
++
++	entry = buf + strlen(pattern);
++
++err_out:
++	fclose(fp);
++	return entry;
++}
++
+ bool __check_huge(void *addr, char *pattern, int nr_hpages,
+ 		  uint64_t hpage_size)
+ {
+@@ -269,3 +305,44 @@ int uffd_unregister(int uffd, void *addr, uint64_t len)
+ 
+ 	return ret;
+ }
++
++static bool check_vmflag(void *addr, const char *flag)
++{
++	char buffer[MAX_LINE_LENGTH];
++	const char *flags;
++	size_t flaglen;
++
++	flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer));
++	if (!flags)
++		ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr);
++
++	while (true) {
++		flags += strspn(flags, " ");
++
++		flaglen = strcspn(flags, " ");
++		if (!flaglen)
++			return false;
++
++		if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen))
++			return true;
++
++		flags += flaglen;
++	}
++}
++
++bool softdirty_supported(void)
++{
++	char *addr;
++	bool supported = false;
++	const size_t pagesize = getpagesize();
++
++	/* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
++	addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
++		    MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
++	if (!addr)
++		ksft_exit_fail_msg("mmap failed\n");
++
++	supported = check_vmflag(addr, "sd");
++	munmap(addr, pagesize);
++	return supported;
++}
+diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
+index 0c603bec5e209c..9816d6e9bce6c9 100644
+--- a/tools/testing/selftests/mm/vm_util.h
++++ b/tools/testing/selftests/mm/vm_util.h
+@@ -51,6 +51,7 @@ int uffd_register(int uffd, void *addr, uint64_t len,
+ int uffd_unregister(int uffd, void *addr, uint64_t len);
+ int uffd_register_with_ioctls(int uffd, void *addr, uint64_t len,
+ 			      bool miss, bool wp, bool minor, uint64_t *ioctls);
++bool softdirty_supported(void);
+ 
+ /*
+  * On ppc64 this will only work with radix 2M hugepage size
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 442b7220468afc..9a907d8260c9c7 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3234,6 +3234,17 @@ deny_join_id0_tests()
+ 		run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 1 1 1
+ 	fi
++
++	# default limits, server deny join id 0 + signal
++	if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then
++		pm_nl_set_limits $ns1 0 2
++		pm_nl_set_limits $ns2 0 2
++		pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++		pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
++		pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
++		run_tests $ns1 $ns2 10.0.1.1
++		chk_join_nr 2 2 2
++	fi
+ }
+ 
+ fullmesh_tests()
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index f6156790c3b4df..05dc77fd527b3b 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -40,9 +40,9 @@
+  * Define weak versions to play nice with binaries that are statically linked
+  * against a libc that doesn't support registering its own rseq.
+  */
+-__weak ptrdiff_t __rseq_offset;
+-__weak unsigned int __rseq_size;
+-__weak unsigned int __rseq_flags;
++extern __weak ptrdiff_t __rseq_offset;
++extern __weak unsigned int __rseq_size;
++extern __weak unsigned int __rseq_flags;
+ 
+ static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset;
+ static const unsigned int *libc_rseq_size_p = &__rseq_size;
+@@ -198,7 +198,7 @@ void rseq_init(void)
+ 	 * libc not having registered a restartable sequence.  Try to find the
+ 	 * symbols if that's the case.
+ 	 */
+-	if (!*libc_rseq_size_p) {
++	if (!libc_rseq_size_p || !*libc_rseq_size_p) {
+ 		libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
+ 		libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
+ 		libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");


             reply	other threads:[~2025-10-20  5:31 UTC|newest]

Thread overview: 178+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-20  5:30 Arisu Tachibana [this message]
  -- strict thread matches above, loose matches on Subject: below --
2025-11-03  1:32 [gentoo-commits] proj/linux-patches:6.6 commit in: / Arisu Tachibana
2025-10-30  6:41 Arisu Tachibana
2025-10-24  9:09 Arisu Tachibana
2025-10-15 17:30 Arisu Tachibana
2025-10-13 11:57 Arisu Tachibana
2025-10-06 11:07 Arisu Tachibana
2025-10-02 13:25 Arisu Tachibana
2025-09-25 12:03 Arisu Tachibana
2025-09-20  6:12 Arisu Tachibana
2025-09-20  5:26 Arisu Tachibana
2025-09-12  3:57 Arisu Tachibana
2025-09-10  6:59 Arisu Tachibana
2025-09-10  6:25 Arisu Tachibana
2025-09-10  6:23 Arisu Tachibana
2025-09-10  5:32 Arisu Tachibana
2025-09-04 14:31 Arisu Tachibana
2025-08-28 17:07 Arisu Tachibana
2025-08-28 15:34 Arisu Tachibana
2025-08-16  3:10 Arisu Tachibana
2025-08-01 10:31 Arisu Tachibana
2025-07-24  9:18 Arisu Tachibana
2025-07-18 12:06 Arisu Tachibana
2025-07-14 16:20 Arisu Tachibana
2025-07-11  2:28 Arisu Tachibana
2025-07-06 13:41 Arisu Tachibana
2025-06-27 11:19 Mike Pagano
2025-06-20 12:16 Mike Pagano
2025-06-19 14:23 Mike Pagano
2025-06-04 18:27 Mike Pagano
2025-06-04 18:11 Mike Pagano
2025-05-27 20:06 Mike Pagano
2025-05-22 13:38 Mike Pagano
2025-05-18 14:33 Mike Pagano
2025-05-09 10:57 Mike Pagano
2025-05-03 19:45 Mike Pagano
2025-05-02 10:55 Mike Pagano
2025-04-25 11:48 Mike Pagano
2025-04-10 13:30 Mike Pagano
2025-04-07 10:31 Mike Pagano
2025-03-29 10:48 Mike Pagano
2025-03-23 11:46 Mike Pagano
2025-03-23 11:44 Mike Pagano
2025-03-23 11:33 Mike Pagano
2025-03-13 12:55 Mike Pagano
2025-03-09 10:48 Mike Pagano
2025-03-07 16:37 Mike Pagano
2025-02-27 13:23 Mike Pagano
2025-02-21 13:31 Mike Pagano
2025-02-17 11:22 Mike Pagano
2025-02-17 11:17 Mike Pagano
2025-02-11 11:44 Mike Pagano
2025-02-08 11:27 Mike Pagano
2025-02-01 23:07 Mike Pagano
2025-01-30 12:49 Mike Pagano
2025-01-23 17:22 Mike Pagano
2025-01-23 17:03 Mike Pagano
2025-01-21 11:36 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-10 14:18 Mike Pagano
2025-01-09 13:53 Mike Pagano
2025-01-06 23:29 Mike Pagano
2025-01-02 12:33 Mike Pagano
2024-12-30 11:17 Mike Pagano
2024-12-30 11:17 Mike Pagano
2024-12-30  0:06 Mike Pagano
2024-12-27 14:08 Mike Pagano
2024-12-25 12:28 Mike Pagano
2024-12-19 18:08 Mike Pagano
2024-12-14 23:48 Mike Pagano
2024-12-12 19:41 Mike Pagano
2024-12-11 17:00 Mike Pagano
2024-12-09 11:36 Mike Pagano
2024-11-30 17:34 Mike Pagano
2024-11-22 17:52 Mike Pagano
2024-11-22 17:47 Mike Pagano
2024-11-19 19:20 Mike Pagano
2024-11-17 18:16 Mike Pagano
2024-11-14 14:54 Mike Pagano
2024-11-14 13:27 Mike Pagano
2024-11-08 16:30 Mike Pagano
2024-11-04 20:46 Mike Pagano
2024-11-03 11:26 Mike Pagano
2024-11-01 12:02 Mike Pagano
2024-11-01 11:52 Mike Pagano
2024-11-01 11:27 Mike Pagano
2024-10-26 22:46 Mike Pagano
2024-10-25 11:44 Mike Pagano
2024-10-22 16:57 Mike Pagano
2024-10-17 14:28 Mike Pagano
2024-10-17 14:05 Mike Pagano
2024-10-10 11:37 Mike Pagano
2024-10-04 15:23 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-30 15:18 Mike Pagano
2024-09-18 18:03 Mike Pagano
2024-09-12 12:32 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:51 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:24 Mike Pagano
2024-08-14 15:14 Mike Pagano
2024-08-14 14:51 Mike Pagano
2024-08-14 14:10 Mike Pagano
2024-08-11 13:28 Mike Pagano
2024-08-10 15:43 Mike Pagano
2024-08-03 15:22 Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 15:48 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:15 Mike Pagano
2024-07-11 11:48 Mike Pagano
2024-07-09 10:45 Mike Pagano
2024-07-05 10:49 Mike Pagano
2024-06-27 12:32 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:23 Mike Pagano
2024-05-25 15:17 Mike Pagano
2024-05-17 11:49 Mike Pagano
2024-05-17 11:35 Mike Pagano
2024-05-05 18:06 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-27 22:05 Mike Pagano
2024-04-27 17:21 Mike Pagano
2024-04-27 17:05 Mike Pagano
2024-04-18  6:38 Alice Ferrazzi
2024-04-18  3:05 Alice Ferrazzi
2024-04-13 13:06 Mike Pagano
2024-04-11 14:49 Mike Pagano
2024-04-10 15:09 Mike Pagano
2024-04-04 19:06 Mike Pagano
2024-04-03 14:03 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-02 22:37 Mike Pagano
2024-03-01 13:06 Mike Pagano
2024-02-23 13:25 Mike Pagano
2024-02-23 12:36 Mike Pagano
2024-02-22 13:39 Mike Pagano
2024-02-16 19:06 Mike Pagano
2024-02-16 18:59 Mike Pagano
2024-02-06 17:27 Mike Pagano
2024-02-06 15:38 Mike Pagano
2024-02-06 15:34 Mike Pagano
2024-02-05 21:04 Mike Pagano
2024-02-05 21:00 Mike Pagano
2024-02-01 23:18 Mike Pagano
2024-02-01  1:22 Mike Pagano
2024-01-26 22:48 Mike Pagano
2024-01-26  0:08 Mike Pagano
2024-01-25 13:49 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:46 Mike Pagano
2024-01-10 17:20 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:49 Mike Pagano
2024-01-04 15:36 Mike Pagano
2024-01-01 13:45 Mike Pagano
2023-12-20 16:55 Mike Pagano
2023-12-17 14:55 Mike Pagano
2023-12-13 18:26 Mike Pagano
2023-12-11 14:19 Mike Pagano
2023-12-08 12:01 Mike Pagano
2023-12-08 10:54 Mike Pagano
2023-12-07 18:53 Mike Pagano
2023-12-03 11:24 Mike Pagano
2023-12-03 11:15 Mike Pagano
2023-12-01 10:31 Mike Pagano
2023-11-28 18:16 Mike Pagano
2023-11-28 17:50 Mike Pagano
2023-11-20 11:40 Mike Pagano
2023-11-19 15:18 Mike Pagano
2023-11-19 14:41 Mike Pagano
2023-11-08 11:52 Mike Pagano
2023-10-30 11:30 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1760938232.ed1be0b34ccfde8f1ca4d17fbf44edbde97175f4.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox